repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
project-em/ns3-sentiment | stance/topic_scoring.py | 1 | 1539 | from gensim.models import KeyedVectors
from nltk import pos_tag
from nltk import word_tokenize
import numpy as np
word2vec_filepath = "./data/GoogleNews-vectors-negative300.bin"
# word2vec similarity between the topic and the nouns of the candidate sentence
def nouns_sim(word2vec, sentences, tagged_sentences, tagged_topic):
sim_scores = np.zeros((len(sentences)))
for i, tagged_sentence in enumerate(tagged_sentences):
topic_nouns = {word for word, pos in tagged_topic
if pos.startswith("NN") and word in word2vec.vocab}
sentence_nouns = {word for word, pos in tagged_sentence
if pos.startswith("NN") and word in word2vec.vocab}
if len(sentence_nouns) == 0 or len(topic_nouns) == 0:
sim_scores[i] = 0
continue
similarity = word2vec.n_similarity(topic_nouns, sentence_nouns)
print("topic nouns are: ", topic_nouns)
print("sentence nouns are: ", sentence_nouns)
print("similarity is: ", similarity)
sim_scores[i] = similarity
return sim_scores
def compute_sim(sentences, topic, word2vec):
tagged_sentences = [pos_tag(word_tokenize(sentence)) for sentence in sentences]
tagged_topic = pos_tag(word_tokenize(topic))
return nouns_sim(word2vec, sentences, tagged_sentences, tagged_topic)
def load_word2vec():
print("loading word2vec")
word2vec = KeyedVectors.load_word2vec_format(word2vec_filepath, binary=True)
print("finished loading word2vec")
return word2vec | mit |
jirikuncar/invenio | invenio/modules/records/views.py | 2 | 12269 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebSearch Flask Blueprint."""
import cStringIO
from functools import wraps
from flask import g, render_template, request, flash, redirect, url_for, \
current_app, abort, Blueprint, send_file
from flask_breadcrumbs import default_breadcrumb_root
from flask_login import current_user
from flask_menu import register_menu
from invenio.base.decorators import wash_arguments
from invenio.base.globals import cfg
from invenio.base.i18n import _
from invenio.base.signals import pre_template_render
from invenio.config import CFG_SITE_RECORD
from invenio.ext.template.context_processor import \
register_template_context_processor
from invenio.modules.collections.models import Collection
from invenio.modules.search.signals import record_viewed
from invenio.utils import apache
from .api import get_record
from .models import Record as Bibrec
from .utils import references_nb_counts, citations_nb_counts, \
visible_collection_tabs
blueprint = Blueprint('record', __name__, url_prefix="/" + CFG_SITE_RECORD,
static_url_path='/record', template_folder='templates',
static_folder='static')
default_breadcrumb_root(blueprint, '.')
def request_record(f):
"""Perform standard operation to check record availability for user."""
@wraps(f)
def decorated(recid, *args, **kwargs):
from invenio.modules.access.mailcookie import \
mail_cookie_create_authorize_action
from invenio.modules.access.local_config import VIEWRESTRCOLL
from invenio.legacy.search_engine import \
guess_primary_collection_of_a_record, \
check_user_can_view_record
# ensure recid to be integer
recid = int(recid)
g.collection = collection = Collection.query.filter(
Collection.name == guess_primary_collection_of_a_record(recid)).\
one()
(auth_code, auth_msg) = check_user_can_view_record(current_user, recid)
# only superadmins can use verbose parameter for obtaining debug
# information
if not current_user.is_super_admin and 'verbose' in kwargs:
kwargs['verbose'] = 0
if auth_code and current_user.is_guest:
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {
'collection': g.collection.name})
url_args = {'action': cookie, 'ln': g.ln, 'referer': request.url}
flash(_("Authorization failure"), 'error')
return redirect(url_for('webaccount.login', **url_args))
elif auth_code:
flash(auth_msg, 'error')
abort(apache.HTTP_UNAUTHORIZED)
from invenio.legacy.search_engine import record_exists, \
get_merged_recid
# check if the current record has been deleted
# and has been merged, case in which the deleted record
# will be redirect to the new one
record_status = record_exists(recid)
merged_recid = get_merged_recid(recid)
if record_status == -1 and merged_recid:
return redirect(url_for('record.metadata', recid=merged_recid))
elif record_status == -1:
abort(apache.HTTP_GONE) # The record is gone!
g.bibrec = Bibrec.query.get(recid)
record = get_record(recid)
if record is None:
return render_template('404.html')
title = record.get(cfg.get('RECORDS_BREADCRUMB_TITLE_KEY'), '')
tabs = []
if cfg.get('CFG_WEBLINKBACK_TRACKBACK_ENABLED'):
@register_template_context_processor
def trackback_context():
from invenio.legacy.weblinkback.templates import \
get_trackback_auto_discovery_tag
return {'headerLinkbackTrackbackLink':
get_trackback_auto_discovery_tag(recid)}
def _format_record(recid, of='hd', user_info=current_user, *args,
**kwargs):
from invenio.modules.formatter import format_record
return format_record(recid, of, user_info=user_info, *args,
**kwargs)
@register_template_context_processor
def record_context():
from invenio.modules.comments.api import get_mini_reviews
return dict(recid=recid,
record=record,
tabs=tabs,
title=title,
get_mini_reviews=get_mini_reviews,
collection=collection,
format_record=_format_record
)
pre_template_render.send(
"%s.%s" % (blueprint.name, f.__name__),
recid=recid,
)
return f(recid, *args, **kwargs)
return decorated
@blueprint.route('/<int:recid>/metadata', methods=['GET', 'POST'])
@blueprint.route('/<int:recid>/', methods=['GET', 'POST'])
@blueprint.route('/<int:recid>', methods=['GET', 'POST'])
@blueprint.route('/<int:recid>/export/<of>', methods=['GET', 'POST'])
@wash_arguments({'of': (unicode, 'hd'), 'ot': (unicode, None)})
@request_record
@register_menu(blueprint, 'record.metadata', _('Information'), order=1,
endpoint_arguments_constructor=lambda:
dict(recid=request.view_args.get('recid')),
visible_when=visible_collection_tabs('metadata'))
def metadata(recid, of='hd', ot=None):
"""Display formated record metadata."""
from invenio.legacy.bibrank.downloads_similarity import \
register_page_view_event
from invenio.modules.formatter import get_output_format_content_type
register_page_view_event(recid, current_user.get_id(),
str(request.remote_addr))
if get_output_format_content_type(of) != 'text/html':
from invenio.modules.search.views.search import \
response_formated_records
return response_formated_records([recid], g.collection, of, qid=None)
# Send the signal 'document viewed'
record_viewed.send(
current_app._get_current_object(),
recid=recid,
id_user=current_user.get_id(),
request=request)
return render_template('records/metadata.html', of=of, ot=ot)
@blueprint.route('/<int:recid>/references', methods=['GET', 'POST'])
@request_record
@register_menu(blueprint, 'record.references', _('References'), order=2,
visible_when=visible_collection_tabs('references'),
endpoint_arguments_constructor=lambda:
dict(recid=request.view_args.get('recid')),
count=references_nb_counts)
def references(recid):
"""Display references."""
return render_template('records/references.html')
@blueprint.route('/<int:recid>/files', methods=['GET', 'POST'])
@request_record
@register_menu(blueprint, 'record.files', _('Files'), order=8,
endpoint_arguments_constructor=lambda:
dict(recid=request.view_args.get('recid')),
visible_when=visible_collection_tabs('files'))
def files(recid):
"""Return overview of attached files."""
def get_files():
from invenio.legacy.bibdocfile.api import BibRecDocs
for bibdoc in BibRecDocs(recid).list_bibdocs():
for file in bibdoc.list_all_files():
yield file.get_url()
return render_template('records/files.html', files=list(get_files()))
@blueprint.route('/<int:recid>/files/<path:filename>', methods=['GET'])
@request_record
def file(recid, filename):
"""Serve attached documents."""
from invenio.modules.documents import api
record = get_record(recid)
duuids = [uuid for (k, uuid) in record.get('_documents', [])
if k == filename]
error = 404
for duuid in duuids:
document = api.Document.get_document(duuid)
if not document.is_authorized(current_user):
current_app.logger.info(
"Unauthorized access to /{recid}/files/{filename} "
"({document}) by {current_user}".format(
recid=recid, filename=filename, document=document,
current_user=current_user))
error = 401
continue
# TODO add logging of downloads
if document.get('linked', False):
if document.get('uri').startswith('http://') or \
document.get('uri').startswith('https://'):
return redirect(document.get('uri'))
# FIXME create better streaming support
file_ = cStringIO.StringIO(document.open('rb').read())
file_.seek(0)
return send_file(file_, mimetype='application/octet-stream',
attachment_filename=filename)
return send_file(document['uri'])
abort(error)
@blueprint.route('/<int:recid>/citations', methods=['GET', 'POST'])
@request_record
@register_menu(blueprint, 'record.citations', _('Citations'), order=3,
visible_when=visible_collection_tabs('citations'),
endpoint_arguments_constructor=lambda:
dict(recid=request.view_args.get('recid')),
count=citations_nb_counts)
def citations(recid):
"""Display citations."""
from invenio.legacy.bibrank.citation_searcher import calculate_cited_by_list,\
get_self_cited_by, calculate_co_cited_with_list
citations = dict(
citinglist=calculate_cited_by_list(recid),
selfcited=get_self_cited_by(recid),
co_cited=calculate_co_cited_with_list(recid)
)
return render_template('records/citations.html',
citations=citations)
@blueprint.route('/<int:recid>/keywords', methods=['GET', 'POST'])
@request_record
@register_menu(blueprint, 'record.keywords', _('Keywords'), order=4,
endpoint_arguments_constructor=lambda:
dict(recid=request.view_args.get('recid')),
visible_when=visible_collection_tabs('keywords'))
def keywords(recid):
"""Return keywords overview."""
from invenio.legacy.bibclassify.webinterface import record_get_keywords
found, keywords, record = record_get_keywords(recid)
return render_template('records/keywords.html',
found=found,
keywords=keywords)
@blueprint.route('/<int:recid>/usage', methods=['GET', 'POST'])
@request_record
@register_menu(blueprint, 'record.usage', _('Usage statistics'), order=7,
endpoint_arguments_constructor=lambda:
dict(recid=request.view_args.get('recid')),
visible_when=visible_collection_tabs('usage'))
def usage(recid):
"""Return usage statistics."""
from invenio.legacy.bibrank.downloads_similarity import \
calculate_reading_similarity_list
from invenio.legacy.bibrank.downloads_grapher import \
create_download_history_graph_and_box
viewsimilarity = calculate_reading_similarity_list(recid, "pageviews")
downloadsimilarity = calculate_reading_similarity_list(recid, "downloads")
downloadgraph = create_download_history_graph_and_box(recid)
return render_template('records/usage.html',
viewsimilarity=viewsimilarity,
downloadsimilarity=downloadsimilarity,
downloadgraph=downloadgraph)
@blueprint.route('/', methods=['GET', 'POST'])
def no_recid():
"""Redirect to homepage."""
return redirect("/")
| gpl-2.0 |
telwertowski/QGIS | tests/src/python/test_qgsserver_plugins.py | 13 | 9203 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsServer plugins and filters.
From build dir, run: ctest -R PyQgsServerPlugins -V
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Alessandro Pasotti'
__date__ = '22/04/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import os
from qgis.server import QgsServer
from qgis.core import QgsMessageLog
from qgis.testing import unittest
from utilities import unitTestDataPath
from test_qgsserver import QgsServerTestBase
import osgeo.gdal # NOQA
# Strip path and content length because path may vary
RE_STRIP_UNCHECKABLE = b'MAP=[^"]+|Content-Length: \d+'
RE_ATTRIBUTES = b'[^>\s]+=[^>\s]+'
class TestQgsServerPlugins(QgsServerTestBase):
def setUp(self):
"""Create the server instance"""
self.testdata_path = unitTestDataPath('qgis_server') + '/'
d = unitTestDataPath('qgis_server_accesscontrol') + '/'
self.projectPath = os.path.join(d, "project.qgs")
# Clean env just to be sure
env_vars = ['QUERY_STRING', 'QGIS_PROJECT_FILE']
for ev in env_vars:
try:
del os.environ[ev]
except KeyError:
pass
self.server = QgsServer()
def test_pluginfilters(self):
"""Test python plugins filters"""
try:
from qgis.server import QgsServerFilter
except ImportError:
print("QGIS Server plugins are not compiled. Skipping test")
return
class SimpleHelloFilter(QgsServerFilter):
def requestReady(self):
QgsMessageLog.logMessage("SimpleHelloFilter.requestReady")
def sendResponse(self):
QgsMessageLog.logMessage("SimpleHelloFilter.sendResponse")
def responseComplete(self):
request = self.serverInterface().requestHandler()
params = request.parameterMap()
QgsMessageLog.logMessage("SimpleHelloFilter.responseComplete")
if params.get('SERVICE', '').upper() == 'SIMPLE':
request.clear()
request.setResponseHeader('Content-type', 'text/plain')
request.appendBody('Hello from SimpleServer!'.encode('utf-8'))
serverIface = self.server.serverInterface()
filter = SimpleHelloFilter(serverIface)
serverIface.registerFilter(filter, 100)
# Get registered filters
self.assertEqual(filter, serverIface.filters()[100][0])
# global to be modified inside plugin filters
globals()['status_code'] = 0
# body to be checked inside plugin filters
globals()['body2'] = None
# headers to be checked inside plugin filters
globals()['headers2'] = None
# Register some more filters
class Filter1(QgsServerFilter):
def responseComplete(self):
request = self.serverInterface().requestHandler()
params = request.parameterMap()
if params.get('SERVICE', '').upper() == 'SIMPLE':
request.appendBody('Hello from Filter1!'.encode('utf-8'))
class Filter2(QgsServerFilter):
def responseComplete(self):
request = self.serverInterface().requestHandler()
params = request.parameterMap()
if params.get('SERVICE', '').upper() == 'SIMPLE':
request.appendBody('Hello from Filter2!'.encode('utf-8'))
class Filter3(QgsServerFilter):
"""Test get and set status code"""
def responseComplete(self):
global status_code
request = self.serverInterface().requestHandler()
request.setStatusCode(999)
status_code = request.statusCode()
class Filter4(QgsServerFilter):
"""Body getter"""
def responseComplete(self):
global body2
request = self.serverInterface().requestHandler()
body2 = request.body()
class Filter5(QgsServerFilter):
"""Body setter, clear body, keep headers"""
def responseComplete(self):
global headers2
request = self.serverInterface().requestHandler()
request.clearBody()
headers2 = request.responseHeaders()
request.appendBody('new body, new life!'.encode('utf-8'))
filter1 = Filter1(serverIface)
filter2 = Filter2(serverIface)
filter3 = Filter3(serverIface)
filter4 = Filter4(serverIface)
serverIface.registerFilter(filter1, 101)
serverIface.registerFilter(filter2, 200)
serverIface.registerFilter(filter2, 100)
serverIface.registerFilter(filter3, 300)
serverIface.registerFilter(filter4, 400)
self.assertTrue(filter2 in serverIface.filters()[100])
self.assertEqual(filter1, serverIface.filters()[101][0])
self.assertEqual(filter2, serverIface.filters()[200][0])
header, body = [_v for _v in self._execute_request('?service=simple')]
response = header + body
expected = b'Content-Length: 62\nContent-type: text/plain\n\nHello from SimpleServer!Hello from Filter1!Hello from Filter2!'
self.assertEqual(response, expected)
# Check status code
self.assertEqual(status_code, 999)
# Check body getter from filter
self.assertEqual(body2, b'Hello from SimpleServer!Hello from Filter1!Hello from Filter2!')
# Check that the bindings for complex type QgsServerFiltersMap are working
filters = {100: [filter, filter2], 101: [filter1], 200: [filter2]}
serverIface.setFilters(filters)
self.assertTrue(filter in serverIface.filters()[100])
self.assertTrue(filter2 in serverIface.filters()[100])
self.assertEqual(filter1, serverIface.filters()[101][0])
self.assertEqual(filter2, serverIface.filters()[200][0])
header, body = self._execute_request('?service=simple')
response = header + body
expected = b'Content-Length: 62\nContent-type: text/plain\n\nHello from SimpleServer!Hello from Filter1!Hello from Filter2!'
self.assertEqual(response, expected)
# Now, re-run with body setter
filter5 = Filter5(serverIface)
serverIface.registerFilter(filter5, 500)
header, body = self._execute_request('?service=simple')
response = header + body
expected = b'Content-Length: 19\nContent-type: text/plain\n\nnew body, new life!'
self.assertEqual(response, expected)
self.assertEqual(headers2, {'Content-type': 'text/plain'})
def test_configpath(self):
""" Test plugin can read confif path
"""
try:
from qgis.server import QgsServerFilter
from qgis.core import QgsProject
except ImportError:
print("QGIS Server plugins are not compiled. Skipping test")
return
d = unitTestDataPath('qgis_server_accesscontrol') + '/'
self.projectPath = os.path.join(d, "project.qgs")
self.server = QgsServer()
# global to be modified inside plugin filters
globals()['configFilePath2'] = None
class Filter0(QgsServerFilter):
"""Body setter, clear body, keep headers"""
def requestReady(self):
global configFilePath2
configFilePath2 = self.serverInterface().configFilePath()
serverIface = self.server.serverInterface()
serverIface.registerFilter(Filter0(serverIface), 100)
# Test using MAP
self._execute_request('?service=simple&MAP=%s' % self.projectPath)
# Check config file path
self.assertEqual(configFilePath2, self.projectPath)
# Reset result
globals()['configFilePath2'] = None
# Test with prqject as argument
project = QgsProject()
project.read(self.projectPath)
self._execute_request_project('?service=simple', project=project)
# Check config file path
self.assertEqual(configFilePath2, project.fileName())
def test_exceptions(self):
"""Test that plugin filter Python exceptions can be caught"""
try:
from qgis.server import QgsServerFilter
except ImportError:
print("QGIS Server plugins are not compiled. Skipping test")
return
class FilterBroken(QgsServerFilter):
def responseComplete(self):
raise Exception("There was something very wrong!")
serverIface = self.server.serverInterface()
filter1 = FilterBroken(serverIface)
filters = {100: [filter1]}
serverIface.setFilters(filters)
header, body = self._execute_request('')
self.assertEqual(body, b'Internal Server Error')
serverIface.setFilters({})
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
sumedhasingla/VTK | ThirdParty/Twisted/twisted/protocols/policies.py | 35 | 20735 | # -*- test-case-name: twisted.test.test_policies -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Resource limiting policies.
@seealso: See also L{twisted.protocols.htb} for rate limiting.
"""
from __future__ import division, absolute_import
# system imports
import sys
from zope.interface import directlyProvides, providedBy
# twisted imports
from twisted.internet.protocol import ServerFactory, Protocol, ClientFactory
from twisted.internet import error
from twisted.internet.interfaces import ILoggingContext
from twisted.python import log
def _wrappedLogPrefix(wrapper, wrapped):
"""
Compute a log prefix for a wrapper and the object it wraps.
@rtype: C{str}
"""
if ILoggingContext.providedBy(wrapped):
logPrefix = wrapped.logPrefix()
else:
logPrefix = wrapped.__class__.__name__
return "%s (%s)" % (logPrefix, wrapper.__class__.__name__)
class ProtocolWrapper(Protocol):
"""
Wraps protocol instances and acts as their transport as well.
@ivar wrappedProtocol: An L{IProtocol<twisted.internet.interfaces.IProtocol>}
provider to which L{IProtocol<twisted.internet.interfaces.IProtocol>}
method calls onto this L{ProtocolWrapper} will be proxied.
@ivar factory: The L{WrappingFactory} which created this
L{ProtocolWrapper}.
"""
disconnecting = 0
def __init__(self, factory, wrappedProtocol):
self.wrappedProtocol = wrappedProtocol
self.factory = factory
def logPrefix(self):
"""
Use a customized log prefix mentioning both the wrapped protocol and
the current one.
"""
return _wrappedLogPrefix(self, self.wrappedProtocol)
def makeConnection(self, transport):
"""
When a connection is made, register this wrapper with its factory,
save the real transport, and connect the wrapped protocol to this
L{ProtocolWrapper} to intercept any transport calls it makes.
"""
directlyProvides(self, providedBy(transport))
Protocol.makeConnection(self, transport)
self.factory.registerProtocol(self)
self.wrappedProtocol.makeConnection(self)
# Transport relaying
def write(self, data):
self.transport.write(data)
def writeSequence(self, data):
self.transport.writeSequence(data)
def loseConnection(self):
self.disconnecting = 1
self.transport.loseConnection()
def getPeer(self):
return self.transport.getPeer()
def getHost(self):
return self.transport.getHost()
def registerProducer(self, producer, streaming):
self.transport.registerProducer(producer, streaming)
def unregisterProducer(self):
self.transport.unregisterProducer()
def stopConsuming(self):
self.transport.stopConsuming()
def __getattr__(self, name):
return getattr(self.transport, name)
# Protocol relaying
def dataReceived(self, data):
self.wrappedProtocol.dataReceived(data)
def connectionLost(self, reason):
self.factory.unregisterProtocol(self)
self.wrappedProtocol.connectionLost(reason)
class WrappingFactory(ClientFactory):
"""
Wraps a factory and its protocols, and keeps track of them.
"""
protocol = ProtocolWrapper
def __init__(self, wrappedFactory):
self.wrappedFactory = wrappedFactory
self.protocols = {}
def logPrefix(self):
"""
Generate a log prefix mentioning both the wrapped factory and this one.
"""
return _wrappedLogPrefix(self, self.wrappedFactory)
def doStart(self):
self.wrappedFactory.doStart()
ClientFactory.doStart(self)
def doStop(self):
self.wrappedFactory.doStop()
ClientFactory.doStop(self)
def startedConnecting(self, connector):
self.wrappedFactory.startedConnecting(connector)
def clientConnectionFailed(self, connector, reason):
self.wrappedFactory.clientConnectionFailed(connector, reason)
def clientConnectionLost(self, connector, reason):
self.wrappedFactory.clientConnectionLost(connector, reason)
def buildProtocol(self, addr):
return self.protocol(self, self.wrappedFactory.buildProtocol(addr))
def registerProtocol(self, p):
"""
Called by protocol to register itself.
"""
self.protocols[p] = 1
def unregisterProtocol(self, p):
"""
Called by protocols when they go away.
"""
del self.protocols[p]
class ThrottlingProtocol(ProtocolWrapper):
"""
Protocol for L{ThrottlingFactory}.
"""
# wrap API for tracking bandwidth
def write(self, data):
self.factory.registerWritten(len(data))
ProtocolWrapper.write(self, data)
def writeSequence(self, seq):
self.factory.registerWritten(sum(map(len, seq)))
ProtocolWrapper.writeSequence(self, seq)
def dataReceived(self, data):
self.factory.registerRead(len(data))
ProtocolWrapper.dataReceived(self, data)
def registerProducer(self, producer, streaming):
self.producer = producer
ProtocolWrapper.registerProducer(self, producer, streaming)
def unregisterProducer(self):
del self.producer
ProtocolWrapper.unregisterProducer(self)
def throttleReads(self):
self.transport.pauseProducing()
def unthrottleReads(self):
self.transport.resumeProducing()
def throttleWrites(self):
if hasattr(self, "producer"):
self.producer.pauseProducing()
def unthrottleWrites(self):
if hasattr(self, "producer"):
self.producer.resumeProducing()
class ThrottlingFactory(WrappingFactory):
"""
Throttles bandwidth and number of connections.
Write bandwidth will only be throttled if there is a producer
registered.
"""
protocol = ThrottlingProtocol
def __init__(self, wrappedFactory, maxConnectionCount=sys.maxsize,
readLimit=None, writeLimit=None):
WrappingFactory.__init__(self, wrappedFactory)
self.connectionCount = 0
self.maxConnectionCount = maxConnectionCount
self.readLimit = readLimit # max bytes we should read per second
self.writeLimit = writeLimit # max bytes we should write per second
self.readThisSecond = 0
self.writtenThisSecond = 0
self.unthrottleReadsID = None
self.checkReadBandwidthID = None
self.unthrottleWritesID = None
self.checkWriteBandwidthID = None
def callLater(self, period, func):
"""
Wrapper around L{reactor.callLater} for test purpose.
"""
from twisted.internet import reactor
return reactor.callLater(period, func)
def registerWritten(self, length):
"""
Called by protocol to tell us more bytes were written.
"""
self.writtenThisSecond += length
def registerRead(self, length):
"""
Called by protocol to tell us more bytes were read.
"""
self.readThisSecond += length
def checkReadBandwidth(self):
"""
Checks if we've passed bandwidth limits.
"""
if self.readThisSecond > self.readLimit:
self.throttleReads()
throttleTime = (float(self.readThisSecond) / self.readLimit) - 1.0
self.unthrottleReadsID = self.callLater(throttleTime,
self.unthrottleReads)
self.readThisSecond = 0
self.checkReadBandwidthID = self.callLater(1, self.checkReadBandwidth)
def checkWriteBandwidth(self):
if self.writtenThisSecond > self.writeLimit:
self.throttleWrites()
throttleTime = (float(self.writtenThisSecond) / self.writeLimit) - 1.0
self.unthrottleWritesID = self.callLater(throttleTime,
self.unthrottleWrites)
# reset for next round
self.writtenThisSecond = 0
self.checkWriteBandwidthID = self.callLater(1, self.checkWriteBandwidth)
def throttleReads(self):
"""
Throttle reads on all protocols.
"""
log.msg("Throttling reads on %s" % self)
for p in self.protocols.keys():
p.throttleReads()
def unthrottleReads(self):
"""
Stop throttling reads on all protocols.
"""
self.unthrottleReadsID = None
log.msg("Stopped throttling reads on %s" % self)
for p in self.protocols.keys():
p.unthrottleReads()
def throttleWrites(self):
"""
Throttle writes on all protocols.
"""
log.msg("Throttling writes on %s" % self)
for p in self.protocols.keys():
p.throttleWrites()
def unthrottleWrites(self):
"""
Stop throttling writes on all protocols.
"""
self.unthrottleWritesID = None
log.msg("Stopped throttling writes on %s" % self)
for p in self.protocols.keys():
p.unthrottleWrites()
def buildProtocol(self, addr):
if self.connectionCount == 0:
if self.readLimit is not None:
self.checkReadBandwidth()
if self.writeLimit is not None:
self.checkWriteBandwidth()
if self.connectionCount < self.maxConnectionCount:
self.connectionCount += 1
return WrappingFactory.buildProtocol(self, addr)
else:
log.msg("Max connection count reached!")
return None
def unregisterProtocol(self, p):
WrappingFactory.unregisterProtocol(self, p)
self.connectionCount -= 1
if self.connectionCount == 0:
if self.unthrottleReadsID is not None:
self.unthrottleReadsID.cancel()
if self.checkReadBandwidthID is not None:
self.checkReadBandwidthID.cancel()
if self.unthrottleWritesID is not None:
self.unthrottleWritesID.cancel()
if self.checkWriteBandwidthID is not None:
self.checkWriteBandwidthID.cancel()
class SpewingProtocol(ProtocolWrapper):
def dataReceived(self, data):
log.msg("Received: %r" % data)
ProtocolWrapper.dataReceived(self,data)
def write(self, data):
log.msg("Sending: %r" % data)
ProtocolWrapper.write(self,data)
class SpewingFactory(WrappingFactory):
protocol = SpewingProtocol
class LimitConnectionsByPeer(WrappingFactory):
maxConnectionsPerPeer = 5
def startFactory(self):
self.peerConnections = {}
def buildProtocol(self, addr):
peerHost = addr[0]
connectionCount = self.peerConnections.get(peerHost, 0)
if connectionCount >= self.maxConnectionsPerPeer:
return None
self.peerConnections[peerHost] = connectionCount + 1
return WrappingFactory.buildProtocol(self, addr)
def unregisterProtocol(self, p):
peerHost = p.getPeer()[1]
self.peerConnections[peerHost] -= 1
if self.peerConnections[peerHost] == 0:
del self.peerConnections[peerHost]
class LimitTotalConnectionsFactory(ServerFactory):
"""
Factory that limits the number of simultaneous connections.
@type connectionCount: C{int}
@ivar connectionCount: number of current connections.
@type connectionLimit: C{int} or C{None}
@cvar connectionLimit: maximum number of connections.
@type overflowProtocol: L{Protocol} or C{None}
@cvar overflowProtocol: Protocol to use for new connections when
connectionLimit is exceeded. If C{None} (the default value), excess
connections will be closed immediately.
"""
connectionCount = 0
connectionLimit = None
overflowProtocol = None
def buildProtocol(self, addr):
if (self.connectionLimit is None or
self.connectionCount < self.connectionLimit):
# Build the normal protocol
wrappedProtocol = self.protocol()
elif self.overflowProtocol is None:
# Just drop the connection
return None
else:
# Too many connections, so build the overflow protocol
wrappedProtocol = self.overflowProtocol()
wrappedProtocol.factory = self
protocol = ProtocolWrapper(self, wrappedProtocol)
self.connectionCount += 1
return protocol
def registerProtocol(self, p):
pass
def unregisterProtocol(self, p):
self.connectionCount -= 1
class TimeoutProtocol(ProtocolWrapper):
"""
Protocol that automatically disconnects when the connection is idle.
"""
def __init__(self, factory, wrappedProtocol, timeoutPeriod):
"""
Constructor.
@param factory: An L{IFactory}.
@param wrappedProtocol: A L{Protocol} to wrapp.
@param timeoutPeriod: Number of seconds to wait for activity before
timing out.
"""
ProtocolWrapper.__init__(self, factory, wrappedProtocol)
self.timeoutCall = None
self.setTimeout(timeoutPeriod)
def setTimeout(self, timeoutPeriod=None):
"""
Set a timeout.
This will cancel any existing timeouts.
@param timeoutPeriod: If not C{None}, change the timeout period.
Otherwise, use the existing value.
"""
self.cancelTimeout()
if timeoutPeriod is not None:
self.timeoutPeriod = timeoutPeriod
self.timeoutCall = self.factory.callLater(self.timeoutPeriod, self.timeoutFunc)
def cancelTimeout(self):
"""
Cancel the timeout.
If the timeout was already cancelled, this does nothing.
"""
if self.timeoutCall:
try:
self.timeoutCall.cancel()
except error.AlreadyCalled:
pass
self.timeoutCall = None
def resetTimeout(self):
"""
Reset the timeout, usually because some activity just happened.
"""
if self.timeoutCall:
self.timeoutCall.reset(self.timeoutPeriod)
def write(self, data):
self.resetTimeout()
ProtocolWrapper.write(self, data)
def writeSequence(self, seq):
self.resetTimeout()
ProtocolWrapper.writeSequence(self, seq)
def dataReceived(self, data):
self.resetTimeout()
ProtocolWrapper.dataReceived(self, data)
def connectionLost(self, reason):
self.cancelTimeout()
ProtocolWrapper.connectionLost(self, reason)
def timeoutFunc(self):
"""
This method is called when the timeout is triggered.
By default it calls L{loseConnection}. Override this if you want
something else to happen.
"""
self.loseConnection()
class TimeoutFactory(WrappingFactory):
"""
Factory for TimeoutWrapper.
"""
protocol = TimeoutProtocol
def __init__(self, wrappedFactory, timeoutPeriod=30*60):
self.timeoutPeriod = timeoutPeriod
WrappingFactory.__init__(self, wrappedFactory)
def buildProtocol(self, addr):
return self.protocol(self, self.wrappedFactory.buildProtocol(addr),
timeoutPeriod=self.timeoutPeriod)
def callLater(self, period, func):
"""
Wrapper around L{reactor.callLater} for test purpose.
"""
from twisted.internet import reactor
return reactor.callLater(period, func)
class TrafficLoggingProtocol(ProtocolWrapper):
def __init__(self, factory, wrappedProtocol, logfile, lengthLimit=None,
number=0):
"""
@param factory: factory which created this protocol.
@type factory: C{protocol.Factory}.
@param wrappedProtocol: the underlying protocol.
@type wrappedProtocol: C{protocol.Protocol}.
@param logfile: file opened for writing used to write log messages.
@type logfile: C{file}
@param lengthLimit: maximum size of the datareceived logged.
@type lengthLimit: C{int}
@param number: identifier of the connection.
@type number: C{int}.
"""
ProtocolWrapper.__init__(self, factory, wrappedProtocol)
self.logfile = logfile
self.lengthLimit = lengthLimit
self._number = number
def _log(self, line):
self.logfile.write(line + '\n')
self.logfile.flush()
def _mungeData(self, data):
if self.lengthLimit and len(data) > self.lengthLimit:
data = data[:self.lengthLimit - 12] + '<... elided>'
return data
# IProtocol
def connectionMade(self):
self._log('*')
return ProtocolWrapper.connectionMade(self)
def dataReceived(self, data):
self._log('C %d: %r' % (self._number, self._mungeData(data)))
return ProtocolWrapper.dataReceived(self, data)
def connectionLost(self, reason):
self._log('C %d: %r' % (self._number, reason))
return ProtocolWrapper.connectionLost(self, reason)
# ITransport
def write(self, data):
self._log('S %d: %r' % (self._number, self._mungeData(data)))
return ProtocolWrapper.write(self, data)
def writeSequence(self, iovec):
self._log('SV %d: %r' % (self._number, [self._mungeData(d) for d in iovec]))
return ProtocolWrapper.writeSequence(self, iovec)
def loseConnection(self):
self._log('S %d: *' % (self._number,))
return ProtocolWrapper.loseConnection(self)
class TrafficLoggingFactory(WrappingFactory):
protocol = TrafficLoggingProtocol
_counter = 0
def __init__(self, wrappedFactory, logfilePrefix, lengthLimit=None):
self.logfilePrefix = logfilePrefix
self.lengthLimit = lengthLimit
WrappingFactory.__init__(self, wrappedFactory)
def open(self, name):
return file(name, 'w')
def buildProtocol(self, addr):
self._counter += 1
logfile = self.open(self.logfilePrefix + '-' + str(self._counter))
return self.protocol(self, self.wrappedFactory.buildProtocol(addr),
logfile, self.lengthLimit, self._counter)
def resetCounter(self):
"""
Reset the value of the counter used to identify connections.
"""
self._counter = 0
class TimeoutMixin:
"""
Mixin for protocols which wish to timeout connections.
Protocols that mix this in have a single timeout, set using L{setTimeout}.
When the timeout is hit, L{timeoutConnection} is called, which, by
default, closes the connection.
@cvar timeOut: The number of seconds after which to timeout the connection.
"""
timeOut = None
__timeoutCall = None
def callLater(self, period, func):
"""
Wrapper around L{reactor.callLater} for test purpose.
"""
from twisted.internet import reactor
return reactor.callLater(period, func)
def resetTimeout(self):
"""
Reset the timeout count down.
If the connection has already timed out, then do nothing. If the
timeout has been cancelled (probably using C{setTimeout(None)}), also
do nothing.
It's often a good idea to call this when the protocol has received
some meaningful input from the other end of the connection. "I've got
some data, they're still there, reset the timeout".
"""
if self.__timeoutCall is not None and self.timeOut is not None:
self.__timeoutCall.reset(self.timeOut)
def setTimeout(self, period):
"""
Change the timeout period
@type period: C{int} or C{NoneType}
@param period: The period, in seconds, to change the timeout to, or
C{None} to disable the timeout.
"""
prev = self.timeOut
self.timeOut = period
if self.__timeoutCall is not None:
if period is None:
self.__timeoutCall.cancel()
self.__timeoutCall = None
else:
self.__timeoutCall.reset(period)
elif period is not None:
self.__timeoutCall = self.callLater(period, self.__timedOut)
return prev
def __timedOut(self):
self.__timeoutCall = None
self.timeoutConnection()
def timeoutConnection(self):
"""
Called when the connection times out.
Override to define behavior other than dropping the connection.
"""
self.transport.loseConnection()
| bsd-3-clause |
messense/wechat-bot | plugins/oschina.py | 3 | 1103 | #coding=utf-8
import requests
from xml.etree import ElementTree
from tornado.util import ObjectDict
__name__ = 'oschina'
def test(data, msg=None, bot=None):
if ('oschina' in data or '开源中国' in data) and '最新' in data and '新闻' in data:
return True
return False
def respond(data, msg=None, bot=None):
headers = {
'Host' : 'www.oschina.net',
'Connection' : 'Keep-Alive',
'User-Agent' : 'OSChina.NET/1.7.4_1/Android/4.1/Nexus S/12345678'
}
res = requests.get("http://www.oschina.net/action/api/news_list",
headers=headers)
parser = ElementTree.fromstring(res.content)
news_list = parser.find('newslist')
articles = []
i = 0
for news in news_list.iter('news'):
if i > 9:
break
article = ObjectDict()
article.title = news.find('title').text
article.description = article.title
article.url = "http://www.oschina.net/news/%s" % news.find('id').text
article.picurl = ''
articles.append(article)
i += 1
return articles
| mit |
dendisuhubdy/tensorflow | tensorflow/python/data/kernel_tests/shard_dataset_op_test.py | 42 | 4218 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
class ShardDatasetOpTest(test.TestCase):
def testSimpleCase(self):
dataset = dataset_ops.Dataset.range(10).shard(5, 2)
iterator = dataset.make_one_shot_iterator()
with self.test_session() as sess:
self.assertEqual(2, sess.run(iterator.get_next()))
self.assertEqual(7, sess.run(iterator.get_next()))
with self.assertRaises(errors.OutOfRangeError):
sess.run(iterator.get_next())
def testNestedData(self):
dataset_a = dataset_ops.Dataset.range(10)
dataset_b = dataset_ops.Dataset.range(10, 0, -1)
dataset = dataset_ops.Dataset.zip((dataset_a, dataset_b)).shard(5, 2)
iterator = dataset.make_one_shot_iterator()
with self.test_session() as sess:
self.assertEqual((2, 8), sess.run(iterator.get_next()))
self.assertEqual((7, 3), sess.run(iterator.get_next()))
with self.assertRaises(errors.OutOfRangeError):
sess.run(iterator.get_next())
def testOffsetZero(self):
dataset = dataset_ops.Dataset.range(10).shard(5, 0)
iterator = dataset.make_one_shot_iterator()
with self.test_session() as sess:
self.assertEqual(0, sess.run(iterator.get_next()))
self.assertEqual(5, sess.run(iterator.get_next()))
with self.assertRaises(errors.OutOfRangeError):
sess.run(iterator.get_next())
def testOffsetGreaterNumShards(self):
with self.assertRaises(ValueError):
dataset_ops.Dataset.range(10).shard(5, 7)
def testNegativeOffset(self):
with self.assertRaises(ValueError):
dataset_ops.Dataset.range(10).shard(5, -3)
def testNegativeNumShards(self):
with self.assertRaises(ValueError):
dataset_ops.Dataset.range(10).shard(-3, 1)
def testZeroNumShards(self):
with self.assertRaises(ValueError):
dataset_ops.Dataset.range(10).shard(0, 1)
def testIteratorEndsBeforeFirstElem(self):
dataset = dataset_ops.Dataset.range(1).shard(5, 2)
iterator = dataset.make_one_shot_iterator()
with self.test_session() as sess:
with self.assertRaises(errors.OutOfRangeError):
sess.run(iterator.get_next())
def testLargerWorkerPool(self):
dataset = dataset_ops.Dataset.range(10).shard(7, 5)
iterator = dataset.make_one_shot_iterator()
with self.test_session() as sess:
self.assertEqual(5, sess.run(iterator.get_next()))
with self.assertRaises(errors.OutOfRangeError):
sess.run(iterator.get_next())
def testIndexEqualsNumShards(self):
dataset = dataset_ops.Dataset.range(10).shard(5, 4)
iterator = dataset.make_one_shot_iterator()
with self.test_session() as sess:
self.assertEqual(4, sess.run(iterator.get_next()))
self.assertEqual(9, sess.run(iterator.get_next()))
with self.assertRaises(errors.OutOfRangeError):
sess.run(iterator.get_next())
def testIndexEqualsNumShards2(self):
dataset = dataset_ops.Dataset.range(10).shard(4, 3)
iterator = dataset.make_one_shot_iterator()
with self.test_session() as sess:
self.assertEqual(3, sess.run(iterator.get_next()))
self.assertEqual(7, sess.run(iterator.get_next()))
with self.assertRaises(errors.OutOfRangeError):
sess.run(iterator.get_next())
if __name__ == "__main__":
test.main()
| apache-2.0 |
adelton/django | tests/m2m_through_regress/tests.py | 182 | 9847 | from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.core import management
from django.test import TestCase
from django.utils.six import StringIO
from .models import (
Car, CarDriver, Driver, Group, Membership, Person, UserMembership,
)
class M2MThroughTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.bob = Person.objects.create(name="Bob")
cls.jim = Person.objects.create(name="Jim")
cls.rock = Group.objects.create(name="Rock")
cls.roll = Group.objects.create(name="Roll")
cls.frank = User.objects.create_user("frank", "frank@example.com", "password")
cls.jane = User.objects.create_user("jane", "jane@example.com", "password")
# normal intermediate model
cls.bob_rock = Membership.objects.create(person=cls.bob, group=cls.rock)
cls.bob_roll = Membership.objects.create(person=cls.bob, group=cls.roll, price=50)
cls.jim_rock = Membership.objects.create(person=cls.jim, group=cls.rock, price=50)
# intermediate model with custom id column
cls.frank_rock = UserMembership.objects.create(user=cls.frank, group=cls.rock)
cls.frank_roll = UserMembership.objects.create(user=cls.frank, group=cls.roll)
cls.jane_rock = UserMembership.objects.create(user=cls.jane, group=cls.rock)
def test_retrieve_reverse_m2m_items(self):
self.assertQuerysetEqual(
self.bob.group_set.all(), [
"<Group: Rock>",
"<Group: Roll>",
],
ordered=False
)
def test_retrieve_forward_m2m_items(self):
self.assertQuerysetEqual(
self.roll.members.all(), [
"<Person: Bob>",
]
)
def test_cannot_use_setattr_on_reverse_m2m_with_intermediary_model(self):
self.assertRaises(AttributeError, setattr, self.bob, "group_set", [])
def test_cannot_use_setattr_on_forward_m2m_with_intermediary_model(self):
self.assertRaises(AttributeError, setattr, self.roll, "members", [])
def test_cannot_use_create_on_m2m_with_intermediary_model(self):
self.assertRaises(AttributeError, self.rock.members.create, name="Anne")
def test_cannot_use_create_on_reverse_m2m_with_intermediary_model(self):
self.assertRaises(AttributeError, self.bob.group_set.create, name="Funk")
def test_retrieve_reverse_m2m_items_via_custom_id_intermediary(self):
self.assertQuerysetEqual(
self.frank.group_set.all(), [
"<Group: Rock>",
"<Group: Roll>",
],
ordered=False
)
def test_retrieve_forward_m2m_items_via_custom_id_intermediary(self):
self.assertQuerysetEqual(
self.roll.user_members.all(), [
"<User: frank>",
]
)
def test_join_trimming_forwards(self):
"Check that we don't involve too many copies of the intermediate table when doing a join. Refs #8046, #8254"
self.assertQuerysetEqual(
self.rock.members.filter(membership__price=50), [
"<Person: Jim>",
]
)
def test_join_trimming_reverse(self):
self.assertQuerysetEqual(
self.bob.group_set.filter(membership__price=50), [
"<Group: Roll>",
]
)
class M2MThroughSerializationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.bob = Person.objects.create(name="Bob")
cls.roll = Group.objects.create(name="Roll")
cls.bob_roll = Membership.objects.create(person=cls.bob, group=cls.roll)
def test_serialization(self):
"m2m-through models aren't serialized as m2m fields. Refs #8134"
pks = {"p_pk": self.bob.pk, "g_pk": self.roll.pk, "m_pk": self.bob_roll.pk}
out = StringIO()
management.call_command("dumpdata", "m2m_through_regress", format="json", stdout=out)
self.assertJSONEqual(out.getvalue().strip(), """[{"pk": %(m_pk)s, "model": "m2m_through_regress.membership", "fields": {"person": %(p_pk)s, "price": 100, "group": %(g_pk)s}}, {"pk": %(p_pk)s, "model": "m2m_through_regress.person", "fields": {"name": "Bob"}}, {"pk": %(g_pk)s, "model": "m2m_through_regress.group", "fields": {"name": "Roll"}}]""" % pks)
out = StringIO()
management.call_command("dumpdata", "m2m_through_regress", format="xml",
indent=2, stdout=out)
self.assertXMLEqual(out.getvalue().strip(), """
<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object pk="%(m_pk)s" model="m2m_through_regress.membership">
<field to="m2m_through_regress.person" name="person" rel="ManyToOneRel">%(p_pk)s</field>
<field to="m2m_through_regress.group" name="group" rel="ManyToOneRel">%(g_pk)s</field>
<field type="IntegerField" name="price">100</field>
</object>
<object pk="%(p_pk)s" model="m2m_through_regress.person">
<field type="CharField" name="name">Bob</field>
</object>
<object pk="%(g_pk)s" model="m2m_through_regress.group">
<field type="CharField" name="name">Roll</field>
</object>
</django-objects>
""".strip() % pks)
class ToFieldThroughTests(TestCase):
def setUp(self):
self.car = Car.objects.create(make="Toyota")
self.driver = Driver.objects.create(name="Ryan Briscoe")
CarDriver.objects.create(car=self.car, driver=self.driver)
# We are testing if wrong objects get deleted due to using wrong
# field value in m2m queries. So, it is essential that the pk
# numberings do not match.
# Create one intentionally unused driver to mix up the autonumbering
self.unused_driver = Driver.objects.create(name="Barney Gumble")
# And two intentionally unused cars.
self.unused_car1 = Car.objects.create(make="Trabant")
self.unused_car2 = Car.objects.create(make="Wartburg")
def test_to_field(self):
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Ryan Briscoe>"]
)
def test_to_field_reverse(self):
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>"]
)
def test_to_field_clear_reverse(self):
self.driver.car_set.clear()
self.assertQuerysetEqual(
self.driver.car_set.all(), [])
def test_to_field_clear(self):
self.car.drivers.clear()
self.assertQuerysetEqual(
self.car.drivers.all(), [])
# Low level tests for _add_items and _remove_items. We test these methods
# because .add/.remove aren't available for m2m fields with through, but
# through is the only way to set to_field currently. We do want to make
# sure these methods are ready if the ability to use .add or .remove with
# to_field relations is added some day.
def test_add(self):
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Ryan Briscoe>"]
)
# Yikes - barney is going to drive...
self.car.drivers._add_items('car', 'driver', self.unused_driver)
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Barney Gumble>", "<Driver: Ryan Briscoe>"]
)
def test_add_null(self):
nullcar = Car.objects.create(make=None)
with self.assertRaises(ValueError):
nullcar.drivers._add_items('car', 'driver', self.unused_driver)
def test_add_related_null(self):
nulldriver = Driver.objects.create(name=None)
with self.assertRaises(ValueError):
self.car.drivers._add_items('car', 'driver', nulldriver)
def test_add_reverse(self):
car2 = Car.objects.create(make="Honda")
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>"]
)
self.driver.car_set._add_items('driver', 'car', car2)
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>", "<Car: Honda>"],
ordered=False
)
def test_add_null_reverse(self):
nullcar = Car.objects.create(make=None)
with self.assertRaises(ValueError):
self.driver.car_set._add_items('driver', 'car', nullcar)
def test_add_null_reverse_related(self):
nulldriver = Driver.objects.create(name=None)
with self.assertRaises(ValueError):
nulldriver.car_set._add_items('driver', 'car', self.car)
def test_remove(self):
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Ryan Briscoe>"]
)
self.car.drivers._remove_items('car', 'driver', self.driver)
self.assertQuerysetEqual(
self.car.drivers.all(), [])
def test_remove_reverse(self):
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>"]
)
self.driver.car_set._remove_items('driver', 'car', self.car)
self.assertQuerysetEqual(
self.driver.car_set.all(), [])
class ThroughLoadDataTestCase(TestCase):
fixtures = ["m2m_through"]
def test_sequence_creation(self):
"Check that sequences on an m2m_through are created for the through model, not a phantom auto-generated m2m table. Refs #11107"
out = StringIO()
management.call_command("dumpdata", "m2m_through_regress", format="json", stdout=out)
self.assertJSONEqual(out.getvalue().strip(), """[{"pk": 1, "model": "m2m_through_regress.usermembership", "fields": {"price": 100, "group": 1, "user": 1}}, {"pk": 1, "model": "m2m_through_regress.person", "fields": {"name": "Guido"}}, {"pk": 1, "model": "m2m_through_regress.group", "fields": {"name": "Python Core Group"}}]""")
| bsd-3-clause |
jorapi/android_kernel_lge_g3 | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
jaggu303619/asylum-v2.0 | openerp/addons/base_status/__init__.py | 61 | 1087 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-today OpenERP SA (<http://openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base_state
import base_stage
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rfk/regobj | setup.py | 1 | 1664 | #
# This is the regobj setuptools script.
# Originally developed by Ryan Kelly, 2009.
#
# This script is placed in the public domain.
#
from distutils.core import setup
# Safely extract the docstring and version info from the module.
# If we did a straight `import regobj` here we wouldn't be able
# to build on non-win32 machines.
try:
next = next
except NameError:
def next(i):
return i.next()
regobj = {}
try:
src = open("regobj.py")
try:
lines = []
ln = next(src)
while "__version__" not in ln:
lines.append(ln)
ln = next(src)
while "__ver" in ln:
lines.append(ln)
ln = next(src)
exec("".join(lines),regobj)
finally:
src.close()
except ImportError:
pass
VERSION = regobj["__version__"]
NAME = "regobj"
DESCRIPTION = "Pythonic object-based access to the Windows Registry."
LONG_DESC = regobj["__doc__"]
AUTHOR = "Ryan Kelly"
AUTHOR_EMAIL = "ryan@rfk.id.au"
URL="https://github.com/rfk/regobj"
LICENSE = "MIT"
KEYWORDS = "windows registry"
setup(name=NAME,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
description=DESCRIPTION,
long_description=LONG_DESC,
license=LICENSE,
keywords=KEYWORDS,
py_modules=["regobj"],
classifiers=[c.strip() for c in """
Intended Audience :: Developers
License :: OSI Approved :: MIT License
Programming Language :: Python :: 2
Programming Language :: Python :: 3
Topic :: Software Development :: Libraries :: Python Modules
""".split('\n') if c.strip()],
)
| mit |
ibazzi/xbmc | addons/script.module.html5lib/lib/html5lib/filters/optionaltags.py | 1727 | 10500 | from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceeded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceeded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
| gpl-2.0 |
tntC4stl3/scrapy | tests/test_downloadermiddleware_httpproxy.py | 103 | 3439 | import os
import sys
from twisted.trial.unittest import TestCase, SkipTest
from scrapy.downloadermiddlewares.httpproxy import HttpProxyMiddleware
from scrapy.exceptions import NotConfigured
from scrapy.http import Response, Request
from scrapy.spiders import Spider
spider = Spider('foo')
class TestDefaultHeadersMiddleware(TestCase):
failureException = AssertionError
def setUp(self):
self._oldenv = os.environ.copy()
def tearDown(self):
os.environ = self._oldenv
def test_no_proxies(self):
os.environ = {}
self.assertRaises(NotConfigured, HttpProxyMiddleware)
def test_no_enviroment_proxies(self):
os.environ = {'dummy_proxy': 'reset_env_and_do_not_raise'}
mw = HttpProxyMiddleware()
for url in ('http://e.com', 'https://e.com', 'file:///tmp/a'):
req = Request(url)
assert mw.process_request(req, spider) is None
self.assertEquals(req.url, url)
self.assertEquals(req.meta, {})
def test_enviroment_proxies(self):
os.environ['http_proxy'] = http_proxy = 'https://proxy.for.http:3128'
os.environ['https_proxy'] = https_proxy = 'http://proxy.for.https:8080'
os.environ.pop('file_proxy', None)
mw = HttpProxyMiddleware()
for url, proxy in [('http://e.com', http_proxy),
('https://e.com', https_proxy), ('file://tmp/a', None)]:
req = Request(url)
assert mw.process_request(req, spider) is None
self.assertEquals(req.url, url)
self.assertEquals(req.meta.get('proxy'), proxy)
def test_proxy_auth(self):
os.environ['http_proxy'] = 'https://user:pass@proxy:3128'
mw = HttpProxyMiddleware()
req = Request('http://scrapytest.org')
assert mw.process_request(req, spider) is None
self.assertEquals(req.meta, {'proxy': 'https://proxy:3128'})
self.assertEquals(req.headers.get('Proxy-Authorization'), 'Basic dXNlcjpwYXNz')
def test_proxy_auth_empty_passwd(self):
os.environ['http_proxy'] = 'https://user:@proxy:3128'
mw = HttpProxyMiddleware()
req = Request('http://scrapytest.org')
assert mw.process_request(req, spider) is None
self.assertEquals(req.meta, {'proxy': 'https://proxy:3128'})
self.assertEquals(req.headers.get('Proxy-Authorization'), 'Basic dXNlcjo=')
def test_proxy_already_seted(self):
os.environ['http_proxy'] = http_proxy = 'https://proxy.for.http:3128'
mw = HttpProxyMiddleware()
req = Request('http://noproxy.com', meta={'proxy': None})
assert mw.process_request(req, spider) is None
assert 'proxy' in req.meta and req.meta['proxy'] is None
def test_no_proxy(self):
os.environ['http_proxy'] = http_proxy = 'https://proxy.for.http:3128'
mw = HttpProxyMiddleware()
os.environ['no_proxy'] = '*'
req = Request('http://noproxy.com')
assert mw.process_request(req, spider) is None
assert 'proxy' not in req.meta
os.environ['no_proxy'] = 'other.com'
req = Request('http://noproxy.com')
assert mw.process_request(req, spider) is None
assert 'proxy' in req.meta
os.environ['no_proxy'] = 'other.com,noproxy.com'
req = Request('http://noproxy.com')
assert mw.process_request(req, spider) is None
assert 'proxy' not in req.meta
| bsd-3-clause |
fcurella/python-packager | pypackager/utils.py | 1 | 1184 | import collections
def unflatten_dict(a_dict):
resultDict = {}
for key, value in a_dict.items():
parts = key.split("-")
d = resultDict
for part in parts[:-1]:
if part not in d:
d[part] = {}
d = d[part]
d[parts[-1]] = value
return resultDict
def clean_dict(a_dict):
ret = {}
for k, v in a_dict.items():
if v is None:
continue
if isinstance(v, dict):
ret[k] = clean_dict(v).copy()
continue
ret[k] = v
for k, v in ret.copy().items():
if v == {}:
del ret[k]
return ret
def recursive_update(d, u):
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = recursive_update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def import_classpath(class_path):
module_path, class_name = class_path.rsplit('.', 1)
module = __import__(module_path, fromlist=[class_name])
return getattr(module, class_name)
def instantiate_classpath(class_path, *args, **kwargs):
return import_classpath(class_path)(*args, **kwargs)
| mit |
Affix/CouchPotatoServer | libs/tornado/platform/posix.py | 352 | 1859 | #!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Posix implementations of platform-specific functionality."""
from __future__ import absolute_import, division, print_function, with_statement
import fcntl
import os
from tornado.platform import interface
def set_close_exec(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
def _set_nonblocking(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
class Waker(interface.Waker):
def __init__(self):
r, w = os.pipe()
_set_nonblocking(r)
_set_nonblocking(w)
set_close_exec(r)
set_close_exec(w)
self.reader = os.fdopen(r, "rb", 0)
self.writer = os.fdopen(w, "wb", 0)
def fileno(self):
return self.reader.fileno()
def write_fileno(self):
return self.writer.fileno()
def wake(self):
try:
self.writer.write(b"x")
except IOError:
pass
def consume(self):
try:
while True:
result = self.reader.read()
if not result:
break
except IOError:
pass
def close(self):
self.reader.close()
self.writer.close()
| gpl-3.0 |
naototty/pyflag | src/plugins_old/MemoryForensics/Volatility-1.3_Linux_rc.1/forensics/win32/executable.py | 7 | 12379 | # Volatility
# Copyright (C) 2007,2008 Volatile Systems
# Copyright (c) 2008 Brendan Dolan-Gavitt <bdolangavitt@wesleyan.edu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
@author: Brendan Dolan-Gavitt and AAron Walters
@license: GNU General Public License 2.0 or later
@contact: bdolangavitt@wesleyan.edu,awalters@volatilesystems.com
@organization: Volatile Systems LLC
"""
from forensics.object import *
import struct
def round_up(addr, align):
if addr % align == 0: return addr
else: return (addr + (align - (addr % align)))
def round_down(addr, align):
if addr % align == 0: return addr
else: return (addr - (addr % align))
def write_value(of, value_type, addr, data):
pack_str = builtin_types[value_type][1]
packed_data = struct.pack('='+pack_str, data)
of.seek(addr)
of.write(packed_data)
def write_obj(of, types, field, addr, data):
off, tp = get_obj_offset(types, field)
write_value(of, tp, addr+off, data)
def read_section(addr_space, sect, img_base, size):
section_start = img_base + sect['VirtualAddress']
return addr_space.zread(section_start, sect['SizeOfRawData'])
def write_section_header(of, types, orig_header, sect, addr):
# Write original header
of.seek(addr)
of.write(orig_header)
# Change some values
for f in sect:
data = sect[f]
f = f.split("_")
write_obj(of, types, ['_IMAGE_SECTION_HEADER'] + f, addr, data)
def get_sections_start(addr_space, types, header):
nt_header = header + read_obj(addr_space, types,
["_IMAGE_DOS_HEADER", "e_lfanew"], header)
optional_header_start,_ = get_obj_offset(types,
['_IMAGE_NT_HEADERS', 'OptionalHeader'])
optional_header_size = read_obj(addr_space, types,
['_IMAGE_NT_HEADERS', 'FileHeader', 'SizeOfOptionalHeader'],
nt_header)
sections_start = nt_header + optional_header_start + optional_header_size
return sections_start - header
def section_list(addr_space, types, header):
nt_header = header + read_obj(addr_space, types,
["_IMAGE_DOS_HEADER", "e_lfanew"], header)
num_sections = read_obj(addr_space, types,
["_IMAGE_NT_HEADERS", "FileHeader", "NumberOfSections"],
nt_header)
sections_start = get_sections_start(addr_space, types, header)
section_header_size = obj_size(types, '_IMAGE_SECTION_HEADER')
return [ header + sections_start + (i*section_header_size)
for i in range(num_sections) ]
def sanity_check_section(sect, image_size):
# Note: all addresses here are RVAs
if sect['VirtualAddress'] > image_size:
raise ValueError('VirtualAddress %08x is past the end of image.' %
sect['VirtualAddress'])
if sect['Misc_VirtualSize'] > image_size:
raise ValueError('VirtualSize %08x is larger than image size.' %
sect['Misc_VirtualSize'])
if sect['SizeOfRawData'] > image_size:
raise ValueError('SizeOfRawData %08x is larger than image size.' %
sect['SizeOfRawData'])
def get_file_align(addr_space, types, addr):
nt_header = addr + read_obj(addr_space, types,
["_IMAGE_DOS_HEADER", "e_lfanew"], addr)
file_align = read_obj(addr_space, types,
["_IMAGE_NT_HEADERS", "OptionalHeader", "FileAlignment"],
nt_header)
return file_align
def get_sect_align(addr_space, types, addr):
nt_header = addr + read_obj(addr_space, types,
["_IMAGE_DOS_HEADER", "e_lfanew"], addr)
sect_align = read_obj(addr_space, types,
["_IMAGE_NT_HEADERS", "OptionalHeader", "SectionAlignment"],
nt_header)
return sect_align
def get_size_of_image(addr_space, types, addr):
nt_header = addr + read_obj(addr_space, types,
["_IMAGE_DOS_HEADER", "e_lfanew"], addr)
size = read_obj(addr_space, types,
["_IMAGE_NT_HEADERS", "OptionalHeader", "SizeOfImage"],
nt_header)
return size
def get_size_of_headers(addr_space, types, addr):
nt_header = addr + read_obj(addr_space, types,
["_IMAGE_DOS_HEADER", "e_lfanew"], addr)
size = read_obj(addr_space, types,
["_IMAGE_NT_HEADERS", "OptionalHeader", "SizeOfHeaders"],
nt_header)
return size
def section_entry(addr_space, types, sect_addr):
fields = [ ['VirtualAddress'], ['Misc', 'VirtualSize'],
['SizeOfRawData'], ['PointerToRawData'] ]
sect = {}
(name_off,_) = get_obj_offset(types, ['_IMAGE_SECTION_HEADER',
'Name'])
name_len = 8
sect['Name'] = addr_space.zread(sect_addr + name_off, name_len)
for f in fields:
val = read_obj(addr_space, types,
['_IMAGE_SECTION_HEADER'] + f, sect_addr)
sect["_".join(f)] = val
return sect
def audit_read_write(addr_space,types,data_start,data_size,ofile):
first_block = 0x1000 - data_start % 0x1000
full_blocks = ((data_size + (data_start % 0x1000)) / 0x1000) - 1
left_over = (data_size + data_start) % 0x1000
paddr = addr_space.vtop(data_start)
# Deal with reads that are smaller than a block
if data_size < first_block:
data_read = addr_space.zread(data_start,data_size)
if paddr == None:
print "Memory Not Accessible: Virtual Address: 0x%x File Offset: 0x%x Size: 0x%x"%(data_start,ofile.tell(),data_size)
ofile.write(data_read)
return
data_read = addr_space.zread(data_start,first_block)
if paddr == None:
print "Memory Not Accessible: Virtual Address: 0x%x File Offset: 0x%x Size: 0x%x"%(data_start,ofile.tell(),first_block)
ofile.write(data_read)
# The middle part of the read
new_vaddr = data_start + first_block
for i in range(0,full_blocks):
data_read = addr_space.zread(new_vaddr, 0x1000)
if addr_space.vtop(new_vaddr) == None:
print "Memory Not Accessible: Virtual Address: 0x%x File Offset: 0x%x Size: 0x%x"%(new_vaddr,ofile.tell(),0x1000)
ofile.write(data_read)
new_vaddr = new_vaddr + 0x1000
# The last part of the read
if left_over > 0:
data_read = addr_space.zread(new_vaddr, left_over)
if addr_space.vtop(new_vaddr) == None:
print "Memory Not Accessible: Virtual Address: 0x%x File Offset: 0x%x Size: 0x%x"%(new_vaddr,ofile.tell(),left_over)
ofile.write(data_read)
return
def rebuild_exe_dsk(addr_space, types, addr, of, safe=True):
file_align = get_file_align(addr_space, types, addr)
header_size = get_size_of_headers(addr_space, types, addr)
img_size = get_size_of_image(addr_space, types, addr)
header = addr_space.zread(addr, header_size)
of.seek(0)
of.write(header)
sections = section_list(addr_space, types, addr)
for s_addr in sections:
sect = section_entry(addr_space, types, s_addr)
if safe:
sanity_check_section(sect, img_size)
section_start = addr + sect['VirtualAddress']
file_offset_align = round_down(sect['PointerToRawData'], file_align)
if file_offset_align!= sect['PointerToRawData']:
print "Warning: section start on disk not aligned to file alignment."
print "Warning: adjusted section start from %x to %x." % (sect['PointerToRawData'],file_offset_align)
of.seek(file_offset_align)
audit_read_write(addr_space, types,
section_start,sect['SizeOfRawData'],of)
# ***********************************************************************
# * OLD -- Do not use! Has many problems: *
# * 1. Assumes header is no more than 0x1000 bytes *
# * 2. Reads section by section, so data in slack space may be missed. *
# ***********************************************************************
#def rebuild_exe_mem(addr_space, types, addr, of):
# header = addr_space.read(addr, 0x1000)
#
# of.seek(0)
# of.write(header)
#
# file_align = get_file_align(addr_space, types, addr)
# sections = section_list(addr_space, types, addr)
# section_header_size = obj_size(types, '_IMAGE_SECTION_HEADER')
# orig_sections = [section_entry(addr_space, types, s) for s in sections ]
#
# # Write out whole image (base through base+sizeofimage)
# # Loop over sections, set PointerToRawData = VirtualAddress and
# # SizeOfRawData = max(next_section, virtual_size)
#
# # Calculate new file positions for memory sections
# sections_offset = min(s['PointerToRawData'] for s in orig_sections)
# modified_sections = []
# for i in range(len(orig_sections)):
# new_sect = {}
# new_sect['PointerToRawData'] = sections_offset
#
# modified_sections.append(new_sect)
#
# # Adjust the size of the section so it goes all
# # the way to the beginning of the next section.
# # If we're on the last section, make the section
# # go until the end of the image.
# try:
# size = (orig_sections[i+1]['VirtualAddress'] -
# orig_sections[i]['VirtualAddress'])
# except IndexError:
# size = round_up(orig_sections[i]['Misc_VirtualSize'], file_align)
#
# new_sect['SizeOfRawData'] = size
#
# sections_offset += size
#
# # Write modified section headers
# sections_start = get_sections_start(addr_space, types, addr)
# for i in range(len(sections)):
# orig_header = addr_space.read(sections[i], section_header_size)
# write_section_header(of, types, orig_header, modified_sections[i],
# sections_start + (i*section_header_size))
#
# # Write out sections to disk at the calculated positions,
# # using their virtual size in memory
# for orig_sect,mod_sect in zip(orig_sections, modified_sections):
# of.seek(mod_sect['PointerToRawData'])
# section_start = addr + orig_sect['VirtualAddress']
# sect_data = addr_space.read(section_start, orig_sect['Misc_VirtualSize'])
# of.write(sect_data)
def rebuild_exe_mem(addr_space, types, addr, of, safe=True):
sect_align = get_sect_align(addr_space, types, addr)
img_size = get_size_of_image(addr_space, types, addr)
sections = section_list(addr_space, types, addr)
section_header_size = obj_size(types, '_IMAGE_SECTION_HEADER')
orig_sections = [section_entry(addr_space, types, s) for s in sections ]
if safe:
for sect in orig_sections:
sanity_check_section(sect, img_size)
of.seek(0)
audit_read_write(addr_space, types,
addr,img_size,of)
modified_sections = []
for i in range(len(orig_sections)):
new_sect = {}
new_sect['PointerToRawData'] = orig_sections[i]['VirtualAddress']
# Each section should end where the next section starts.
# For the last section, use the in-memory size.
try:
size = (orig_sections[i+1]['VirtualAddress'] -
orig_sections[i]['VirtualAddress'])
except IndexError:
size = round_up(orig_sections[i]['Misc_VirtualSize'], sect_align)
new_sect['SizeOfRawData'] = size
new_sect['Misc_VirtualSize'] = size
modified_sections.append(new_sect)
# Write modified section headers
sections_start = get_sections_start(addr_space, types, addr)
for i in range(len(sections)):
orig_header = addr_space.zread(sections[i], section_header_size)
write_section_header(of, types, orig_header, modified_sections[i],
sections_start + (i*section_header_size))
| gpl-2.0 |
leilihh/nova | nova/tests/network/test_neutronv2.py | 2 | 107698 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
import uuid
import mock
import mox
from neutronclient.common import exceptions
from neutronclient.v2_0 import client
from oslo.config import cfg
import six
from nova.compute import flavors
from nova.conductor import api as conductor_api
from nova import context
from nova import exception
from nova.network import model
from nova.network import neutronv2
from nova.network.neutronv2 import api as neutronapi
from nova.network.neutronv2 import constants
from nova.objects import instance as instance_obj
from nova.openstack.common import jsonutils
from nova.openstack.common import local
from nova import test
from nova import utils
CONF = cfg.CONF
#NOTE: Neutron client raises Exception which is discouraged by HACKING.
# We set this variable here and use it for assertions below to avoid
# the hacking checks until we can make neutron client throw a custom
# exception class instead.
NEUTRON_CLIENT_EXCEPTION = Exception
class MyComparator(mox.Comparator):
def __init__(self, lhs):
self.lhs = lhs
def _com_dict(self, lhs, rhs):
if len(lhs) != len(rhs):
return False
for key, value in lhs.iteritems():
if key not in rhs:
return False
rhs_value = rhs[key]
if not self._com(value, rhs_value):
return False
return True
def _com_list(self, lhs, rhs):
if len(lhs) != len(rhs):
return False
for lhs_value in lhs:
if lhs_value not in rhs:
return False
return True
def _com(self, lhs, rhs):
if lhs is None:
return rhs is None
if isinstance(lhs, dict):
if not isinstance(rhs, dict):
return False
return self._com_dict(lhs, rhs)
if isinstance(lhs, list):
if not isinstance(rhs, list):
return False
return self._com_list(lhs, rhs)
if isinstance(lhs, tuple):
if not isinstance(rhs, tuple):
return False
return self._com_list(lhs, rhs)
return lhs == rhs
def equals(self, rhs):
return self._com(self.lhs, rhs)
def __repr__(self):
return str(self.lhs)
class TestNeutronClient(test.TestCase):
def test_withtoken(self):
self.flags(neutron_url='http://anyhost/')
self.flags(neutron_url_timeout=30)
my_context = context.RequestContext('userid',
'my_tenantid',
auth_token='token')
self.mox.StubOutWithMock(client.Client, "__init__")
client.Client.__init__(
auth_strategy=CONF.neutron_auth_strategy,
endpoint_url=CONF.neutron_url,
token=my_context.auth_token,
timeout=CONF.neutron_url_timeout,
insecure=False,
ca_cert=None).AndReturn(None)
self.mox.ReplayAll()
neutronv2.get_client(my_context)
def test_withouttoken(self):
my_context = context.RequestContext('userid', 'my_tenantid')
self.assertRaises(exceptions.Unauthorized,
neutronv2.get_client,
my_context)
def test_withtoken_context_is_admin(self):
self.flags(neutron_url='http://anyhost/')
self.flags(neutron_url_timeout=30)
my_context = context.RequestContext('userid',
'my_tenantid',
auth_token='token',
is_admin=True)
self.mox.StubOutWithMock(client.Client, "__init__")
client.Client.__init__(
auth_strategy=CONF.neutron_auth_strategy,
endpoint_url=CONF.neutron_url,
token=my_context.auth_token,
timeout=CONF.neutron_url_timeout,
insecure=False,
ca_cert=None).AndReturn(None)
self.mox.ReplayAll()
# Note that although we have admin set in the context we
# are not asking for an admin client, and so we auth with
# our own token
neutronv2.get_client(my_context)
def test_withouttoken_keystone_connection_error(self):
self.flags(neutron_auth_strategy='keystone')
self.flags(neutron_url='http://anyhost/')
my_context = context.RequestContext('userid', 'my_tenantid')
self.assertRaises(NEUTRON_CLIENT_EXCEPTION,
neutronv2.get_client,
my_context)
class TestNeutronv2Base(test.TestCase):
def setUp(self):
super(TestNeutronv2Base, self).setUp()
self.context = context.RequestContext('userid', 'my_tenantid')
setattr(self.context,
'auth_token',
'bff4a5a6b9eb4ea2a6efec6eefb77936')
self.instance = {'project_id': '9d049e4b60b64716978ab415e6fbd5c0',
'uuid': str(uuid.uuid4()),
'display_name': 'test_instance',
'availability_zone': 'nova',
'host': 'some_host',
'security_groups': []}
self.instance2 = {'project_id': '9d049e4b60b64716978ab415e6fbd5c0',
'uuid': str(uuid.uuid4()),
'display_name': 'test_instance2',
'availability_zone': 'nova',
'security_groups': []}
self.nets1 = [{'id': 'my_netid1',
'name': 'my_netname1',
'subnets': ['mysubnid1'],
'tenant_id': 'my_tenantid'}]
self.nets2 = []
self.nets2.append(self.nets1[0])
self.nets2.append({'id': 'my_netid2',
'name': 'my_netname2',
'subnets': ['mysubnid2'],
'tenant_id': 'my_tenantid'})
self.nets3 = self.nets2 + [{'id': 'my_netid3',
'name': 'my_netname3',
'tenant_id': 'my_tenantid'}]
self.nets4 = [{'id': 'his_netid4',
'name': 'his_netname4',
'tenant_id': 'his_tenantid'}]
# A network request with external networks
self.nets5 = self.nets1 + [{'id': 'the-external-one',
'name': 'out-of-this-world',
'router:external': True,
'tenant_id': 'should-be-an-admin'}]
self.nets = [self.nets1, self.nets2, self.nets3,
self.nets4, self.nets5]
self.port_address = '10.0.1.2'
self.port_data1 = [{'network_id': 'my_netid1',
'device_id': self.instance2['uuid'],
'device_owner': 'compute:nova',
'id': 'my_portid1',
'status': 'DOWN',
'admin_state_up': True,
'fixed_ips': [{'ip_address': self.port_address,
'subnet_id': 'my_subid1'}],
'mac_address': 'my_mac1', }]
self.float_data1 = [{'port_id': 'my_portid1',
'fixed_ip_address': self.port_address,
'floating_ip_address': '172.0.1.2'}]
self.dhcp_port_data1 = [{'fixed_ips': [{'ip_address': '10.0.1.9',
'subnet_id': 'my_subid1'}],
'status': 'ACTIVE',
'admin_state_up': True}]
self.port_address2 = '10.0.2.2'
self.port_data2 = []
self.port_data2.append(self.port_data1[0])
self.port_data2.append({'network_id': 'my_netid2',
'device_id': self.instance['uuid'],
'admin_state_up': True,
'status': 'ACTIVE',
'device_owner': 'compute:nova',
'id': 'my_portid2',
'fixed_ips':
[{'ip_address': self.port_address2,
'subnet_id': 'my_subid2'}],
'mac_address': 'my_mac2', })
self.float_data2 = []
self.float_data2.append(self.float_data1[0])
self.float_data2.append({'port_id': 'my_portid2',
'fixed_ip_address': '10.0.2.2',
'floating_ip_address': '172.0.2.2'})
self.port_data3 = [{'network_id': 'my_netid1',
'device_id': 'device_id3',
'status': 'DOWN',
'admin_state_up': True,
'device_owner': 'compute:nova',
'id': 'my_portid3',
'fixed_ips': [], # no fixed ip
'mac_address': 'my_mac3', }]
self.subnet_data1 = [{'id': 'my_subid1',
'cidr': '10.0.1.0/24',
'network_id': 'my_netid1',
'gateway_ip': '10.0.1.1',
'dns_nameservers': ['8.8.1.1', '8.8.1.2']}]
self.subnet_data2 = []
self.subnet_data_n = [{'id': 'my_subid1',
'cidr': '10.0.1.0/24',
'network_id': 'my_netid1',
'gateway_ip': '10.0.1.1',
'dns_nameservers': ['8.8.1.1', '8.8.1.2']},
{'id': 'my_subid2',
'cidr': '20.0.1.0/24',
'network_id': 'my_netid2',
'gateway_ip': '20.0.1.1',
'dns_nameservers': ['8.8.1.1', '8.8.1.2']}]
self.subnet_data2.append({'id': 'my_subid2',
'cidr': '10.0.2.0/24',
'network_id': 'my_netid2',
'gateway_ip': '10.0.2.1',
'dns_nameservers': ['8.8.2.1', '8.8.2.2']})
self.fip_pool = {'id': '4fdbfd74-eaf8-4884-90d9-00bd6f10c2d3',
'name': 'ext_net',
'router:external': True,
'tenant_id': 'admin_tenantid'}
self.fip_pool_nova = {'id': '435e20c3-d9f1-4f1b-bee5-4611a1dd07db',
'name': 'nova',
'router:external': True,
'tenant_id': 'admin_tenantid'}
self.fip_unassociated = {'tenant_id': 'my_tenantid',
'id': 'fip_id1',
'floating_ip_address': '172.24.4.227',
'floating_network_id': self.fip_pool['id'],
'port_id': None,
'fixed_ip_address': None,
'router_id': None}
fixed_ip_address = self.port_data2[1]['fixed_ips'][0]['ip_address']
self.fip_associated = {'tenant_id': 'my_tenantid',
'id': 'fip_id2',
'floating_ip_address': '172.24.4.228',
'floating_network_id': self.fip_pool['id'],
'port_id': self.port_data2[1]['id'],
'fixed_ip_address': fixed_ip_address,
'router_id': 'router_id1'}
self._returned_nw_info = []
self.mox.StubOutWithMock(neutronv2, 'get_client')
self.moxed_client = self.mox.CreateMock(client.Client)
self.addCleanup(CONF.reset)
self.addCleanup(self.mox.VerifyAll)
self.addCleanup(self.mox.UnsetStubs)
self.addCleanup(self.stubs.UnsetAll)
def _stub_allocate_for_instance(self, net_idx=1, **kwargs):
api = neutronapi.API()
self.mox.StubOutWithMock(api, 'get_instance_nw_info')
has_portbinding = False
has_extra_dhcp_opts = False
dhcp_options = kwargs.get('dhcp_options')
if dhcp_options is not None:
has_extra_dhcp_opts = True
if kwargs.get('portbinding'):
has_portbinding = True
api.extensions[constants.PORTBINDING_EXT] = 1
self.mox.StubOutWithMock(api, '_refresh_neutron_extensions_cache')
neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
neutronv2.get_client(
mox.IgnoreArg(), admin=True).MultipleTimes().AndReturn(
self.moxed_client)
api._refresh_neutron_extensions_cache(mox.IgnoreArg())
else:
self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
# Net idx is 1-based for compatibility with existing unit tests
nets = self.nets[net_idx - 1]
ports = {}
fixed_ips = {}
macs = kwargs.get('macs')
if macs:
macs = set(macs)
req_net_ids = []
if 'requested_networks' in kwargs:
for id, fixed_ip, port_id in kwargs['requested_networks']:
if port_id:
self.moxed_client.show_port(port_id).AndReturn(
{'port': {'id': 'my_portid1',
'network_id': 'my_netid1',
'mac_address': 'my_mac1',
'device_id': kwargs.get('_device') and
self.instance2['uuid'] or ''}})
ports['my_netid1'] = self.port_data1[0]
id = 'my_netid1'
if macs is not None:
macs.discard('my_mac1')
else:
fixed_ips[id] = fixed_ip
req_net_ids.append(id)
expected_network_order = req_net_ids
else:
expected_network_order = [n['id'] for n in nets]
if kwargs.get('_break') == 'pre_list_networks':
self.mox.ReplayAll()
return api
search_ids = [net['id'] for net in nets if net['id'] in req_net_ids]
if search_ids:
mox_list_params = {'id': mox.SameElementsAs(search_ids)}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': nets})
else:
mox_list_params = {'tenant_id': self.instance['project_id'],
'shared': False}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': nets})
mox_list_params = {'shared': True}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': []})
ports_in_requested_net_order = []
for net_id in expected_network_order:
port_req_body = {
'port': {
'device_id': self.instance['uuid'],
'device_owner': 'compute:nova',
},
}
if has_portbinding:
port_req_body['port']['binding:host_id'] = (
self.instance.get('host'))
port = ports.get(net_id, None)
if not has_portbinding:
api._populate_neutron_extension_values(mox.IgnoreArg(),
self.instance, mox.IgnoreArg()).AndReturn(None)
else:
# since _populate_neutron_extension_values() will call
# _has_port_binding_extension()
api._has_port_binding_extension(mox.IgnoreArg()).\
AndReturn(has_portbinding)
api._has_port_binding_extension(mox.IgnoreArg()).\
AndReturn(has_portbinding)
if port:
port_id = port['id']
self.moxed_client.update_port(port_id,
MyComparator(port_req_body)
).AndReturn(
{'port': port})
ports_in_requested_net_order.append(port_id)
else:
fixed_ip = fixed_ips.get(net_id)
if fixed_ip:
port_req_body['port']['fixed_ips'] = [{'ip_address':
fixed_ip}]
port_req_body['port']['network_id'] = net_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = \
self.instance['project_id']
if macs:
port_req_body['port']['mac_address'] = macs.pop()
if has_portbinding:
port_req_body['port']['binding:host_id'] = (
self.instance.get('host'))
res_port = {'port': {'id': 'fake'}}
if has_extra_dhcp_opts:
port_req_body['port']['extra_dhcp_opts'] = dhcp_options
if kwargs.get('_break') == 'mac' + net_id:
self.mox.ReplayAll()
return api
self.moxed_client.create_port(
MyComparator(port_req_body)).AndReturn(res_port)
ports_in_requested_net_order.append(res_port['port']['id'])
api.get_instance_nw_info(mox.IgnoreArg(),
self.instance,
networks=nets,
port_ids=ports_in_requested_net_order
).AndReturn(self._returned_nw_info)
self.mox.ReplayAll()
return api
def _verify_nw_info(self, nw_inf, index=0):
id_suffix = index + 1
self.assertEqual('10.0.%s.2' % id_suffix,
nw_inf.fixed_ips()[index]['address'])
self.assertEqual('172.0.%s.2' % id_suffix,
nw_inf.fixed_ips()[index].floating_ip_addresses()[0])
self.assertEqual('my_netname%s' % id_suffix,
nw_inf[index]['network']['label'])
self.assertEqual('my_portid%s' % id_suffix, nw_inf[index]['id'])
self.assertEqual('my_mac%s' % id_suffix, nw_inf[index]['address'])
self.assertEqual('10.0.%s.0/24' % id_suffix,
nw_inf[index]['network']['subnets'][0]['cidr'])
self.assertTrue(model.IP(address='8.8.%s.1' % id_suffix,
version=4, type='dns') in
nw_inf[index]['network']['subnets'][0]['dns'])
def _get_instance_nw_info(self, number):
api = neutronapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(mox.IgnoreArg(),
self.instance['uuid'],
mox.IgnoreArg())
port_data = number == 1 and self.port_data1 or self.port_data2
nets = number == 1 and self.nets1 or self.nets2
net_info_cache = []
for port in port_data:
net_info_cache.append({"network": {"id": port['network_id']},
"id": port['id']})
instance = copy.copy(self.instance)
# This line here does not wrap net_info_cache in jsonutils.dumps()
# intentionally to test the other code path when it's not unicode.
instance['info_cache'] = {'network_info': net_info_cache}
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': port_data})
net_ids = [port['network_id'] for port in port_data]
nets = number == 1 and self.nets1 or self.nets2
self.moxed_client.list_networks(
id=net_ids).AndReturn({'networks': nets})
for i in xrange(1, number + 1):
float_data = number == 1 and self.float_data1 or self.float_data2
for ip in port_data[i - 1]['fixed_ips']:
float_data = [x for x in float_data
if x['fixed_ip_address'] == ip['ip_address']]
self.moxed_client.list_floatingips(
fixed_ip_address=ip['ip_address'],
port_id=port_data[i - 1]['id']).AndReturn(
{'floatingips': float_data})
subnet_data = i == 1 and self.subnet_data1 or self.subnet_data2
self.moxed_client.list_subnets(
id=mox.SameElementsAs(['my_subid%s' % i])).AndReturn(
{'subnets': subnet_data})
self.moxed_client.list_ports(
network_id=subnet_data[0]['network_id'],
device_owner='network:dhcp').AndReturn(
{'ports': []})
self.mox.ReplayAll()
nw_inf = api.get_instance_nw_info(self.context, instance)
for i in xrange(0, number):
self._verify_nw_info(nw_inf, i)
def _allocate_for_instance(self, net_idx=1, **kwargs):
api = self._stub_allocate_for_instance(net_idx, **kwargs)
return api.allocate_for_instance(self.context, self.instance, **kwargs)
class TestNeutronv2(TestNeutronv2Base):
def setUp(self):
super(TestNeutronv2, self).setUp()
neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
def test_get_instance_nw_info_1(self):
# Test to get one port in one network and subnet.
neutronv2.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
self._get_instance_nw_info(1)
def test_get_instance_nw_info_2(self):
# Test to get one port in each of two networks and subnets.
neutronv2.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
self._get_instance_nw_info(2)
def test_get_instance_nw_info_with_nets_add_interface(self):
# This tests that adding an interface to an instance does not
# remove the first instance from the instance.
network_model = model.Network(id='network_id',
bridge='br-int',
injected='injected',
label='fake_network',
tenant_id='fake_tenant')
network_cache = {'info_cache': {
'network_info': [{'id': self.port_data2[0]['id'],
'address': 'mac_address',
'network': network_model,
'type': 'ovs',
'ovs_interfaceid': 'ovs_interfaceid',
'devname': 'devname'}]}}
self._fake_get_instance_nw_info_helper(network_cache,
self.port_data2,
self.nets2,
[self.port_data2[1]['id']])
def test_get_instance_nw_info_remove_ports_from_neutron(self):
# This tests that when a port is removed in neutron it
# is also removed from the nova.
network_model = model.Network(id=self.port_data2[0]['network_id'],
bridge='br-int',
injected='injected',
label='fake_network',
tenant_id='fake_tenant')
network_cache = {'info_cache': {
'network_info': [{'id': 'network_id',
'address': 'mac_address',
'network': network_model,
'type': 'ovs',
'ovs_interfaceid': 'ovs_interfaceid',
'devname': 'devname'}]}}
self._fake_get_instance_nw_info_helper(network_cache,
self.port_data2,
None,
None)
def test_get_instance_nw_info_ignores_neturon_ports(self):
# Tests that only ports in the network_cache are updated
# and ports returned from neutron that match the same
# instance_id/device_id are ignored.
port_data2 = copy.copy(self.port_data2)
# set device_id on the ports to be the same.
port_data2[1]['device_id'] = port_data2[0]['device_id']
network_model = model.Network(id='network_id',
bridge='br-int',
injected='injected',
label='fake_network',
tenant_id='fake_tenant')
network_cache = {'info_cache': {
'network_info': [{'id': 'network_id',
'address': 'mac_address',
'network': network_model,
'type': 'ovs',
'ovs_interfaceid': 'ovs_interfaceid',
'devname': 'devname'}]}}
self._fake_get_instance_nw_info_helper(network_cache,
port_data2,
None,
None)
def _fake_get_instance_nw_info_helper(self, network_cache,
current_neutron_ports,
networks=None, port_ids=None):
"""Helper function to test get_instance_nw_info.
:param network_cache - data already in the nova network cache.
:param current_neutron_ports - updated list of ports from neutron.
:param networks - networks of ports being added to instance.
:param port_ids - new ports being added to instance.
"""
# keep a copy of the original ports/networks to pass to
# get_instance_nw_info() as the code below changes them.
original_port_ids = copy.copy(port_ids)
original_networks = copy.copy(networks)
api = neutronapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(
mox.IgnoreArg(),
self.instance['uuid'], mox.IgnoreArg())
neutronv2.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': current_neutron_ports})
ifaces = network_cache['info_cache']['network_info']
if port_ids is None:
port_ids = [iface['id'] for iface in ifaces]
net_ids = [iface['network']['id'] for iface in ifaces]
nets = [{'id': iface['network']['id'],
'name': iface['network']['label'],
'tenant_id': iface['network']['meta']['tenant_id']}
for iface in ifaces]
if networks is None:
self.moxed_client.list_networks(
id=net_ids).AndReturn({'networks': nets})
else:
networks = networks + [
dict(id=iface['network']['id'],
name=iface['network']['label'],
tenant_id=iface['network']['meta']['tenant_id'])
for iface in ifaces]
port_ids = [iface['id'] for iface in ifaces] + port_ids
index = 0
current_neutron_port_map = {}
for current_neutron_port in current_neutron_ports:
current_neutron_port_map[current_neutron_port['id']] = (
current_neutron_port)
for port_id in port_ids:
current_neutron_port = current_neutron_port_map.get(port_id)
if current_neutron_port:
for ip in current_neutron_port['fixed_ips']:
self.moxed_client.list_floatingips(
fixed_ip_address=ip['ip_address'],
port_id=current_neutron_port['id']).AndReturn(
{'floatingips': [self.float_data2[index]]})
self.moxed_client.list_subnets(
id=mox.SameElementsAs([ip['subnet_id']])
).AndReturn(
{'subnets': [self.subnet_data_n[index]]})
self.moxed_client.list_ports(
network_id=current_neutron_port['network_id'],
device_owner='network:dhcp').AndReturn(
{'ports': self.dhcp_port_data1})
index += 1
self.mox.ReplayAll()
self.instance['info_cache'] = network_cache
instance = copy.copy(self.instance)
instance['info_cache'] = network_cache['info_cache']
nw_infs = api.get_instance_nw_info(self.context,
instance,
networks=original_networks,
port_ids=original_port_ids)
self.assertEqual(index, len(nw_infs))
# ensure that nic ordering is preserved
for iface_index in range(index):
self.assertEqual(nw_infs[iface_index]['id'],
port_ids[iface_index])
def test_get_instance_nw_info_without_subnet(self):
# Test get instance_nw_info for a port without subnet.
api = neutronapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(
mox.IgnoreArg(),
self.instance['uuid'], mox.IgnoreArg())
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': self.port_data3})
self.moxed_client.list_networks(
id=[self.port_data1[0]['network_id']]).AndReturn(
{'networks': self.nets1})
neutronv2.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
self.mox.StubOutWithMock(conductor_api.API,
'instance_get_by_uuid')
net_info_cache = []
for port in self.port_data3:
net_info_cache.append({"network": {"id": port['network_id']},
"id": port['id']})
instance = copy.copy(self.instance)
instance['info_cache'] = {'network_info':
six.text_type(
jsonutils.dumps(net_info_cache))}
self.mox.ReplayAll()
nw_inf = api.get_instance_nw_info(self.context,
instance)
id_suffix = 3
self.assertEqual(0, len(nw_inf.fixed_ips()))
self.assertEqual('my_netname1', nw_inf[0]['network']['label'])
self.assertEqual('my_portid%s' % id_suffix, nw_inf[0]['id'])
self.assertEqual('my_mac%s' % id_suffix, nw_inf[0]['address'])
self.assertEqual(0, len(nw_inf[0]['network']['subnets']))
def test_refresh_neutron_extensions_cache(self):
api = neutronapi.API()
# Note: Don't want the default get_client from setUp()
self.mox.ResetAll()
neutronv2.get_client(mox.IgnoreArg()).AndReturn(
self.moxed_client)
self.moxed_client.list_extensions().AndReturn(
{'extensions': [{'name': 'nvp-qos'}]})
self.mox.ReplayAll()
api._refresh_neutron_extensions_cache(mox.IgnoreArg())
self.assertEqual({'nvp-qos': {'name': 'nvp-qos'}}, api.extensions)
def test_populate_neutron_extension_values_rxtx_factor(self):
api = neutronapi.API()
# Note: Don't want the default get_client from setUp()
self.mox.ResetAll()
neutronv2.get_client(mox.IgnoreArg()).AndReturn(
self.moxed_client)
self.moxed_client.list_extensions().AndReturn(
{'extensions': [{'name': 'nvp-qos'}]})
self.mox.ReplayAll()
flavor = flavors.get_default_flavor()
flavor['rxtx_factor'] = 1
sys_meta = utils.dict_to_metadata(
flavors.save_flavor_info({}, flavor))
instance = {'system_metadata': sys_meta}
port_req_body = {'port': {}}
api._populate_neutron_extension_values(self.context, instance,
port_req_body)
self.assertEqual(port_req_body['port']['rxtx_factor'], 1)
def test_allocate_for_instance_1(self):
# Allocate one port in one network env.
self._allocate_for_instance(1)
def test_allocate_for_instance_2(self):
# Allocate one port in two networks env.
self._allocate_for_instance(2)
def test_allocate_for_instance_accepts_macs_kwargs_None(self):
# The macs kwarg should be accepted as None.
self._allocate_for_instance(1, macs=None)
def test_allocate_for_instance_accepts_macs_kwargs_set(self):
# The macs kwarg should be accepted, as a set, the
# _allocate_for_instance helper checks that the mac is used to create a
# port.
self._allocate_for_instance(1, macs=set(['ab:cd:ef:01:23:45']))
def test_allocate_for_instance_accepts_only_portid(self):
# Make sure allocate_for_instance works when only a portid is provided
self._returned_nw_info = self.port_data1
result = self._allocate_for_instance(
requested_networks=[(None, None, 'my_portid1')])
self.assertEqual(self.port_data1, result)
def test_allocate_for_instance_not_enough_macs_via_ports(self):
# using a hypervisor MAC via a pre-created port will stop it being
# used to dynamically create a port on a network. We put the network
# first in requested_networks so that if the code were to not pre-check
# requested ports, it would incorrectly assign the mac and not fail.
requested_networks = [
(self.nets2[1]['id'], None, None),
(None, None, 'my_portid1')]
api = self._stub_allocate_for_instance(
net_idx=2, requested_networks=requested_networks,
macs=set(['my_mac1']),
_break='mac' + self.nets2[1]['id'])
self.assertRaises(exception.PortNotFree,
api.allocate_for_instance, self.context,
self.instance, requested_networks=requested_networks,
macs=set(['my_mac1']))
def test_allocate_for_instance_not_enough_macs(self):
# If not enough MAC addresses are available to allocate to networks, an
# error should be raised.
# We could pass in macs=set(), but that wouldn't tell us that
# allocate_for_instance tracks used macs properly, so we pass in one
# mac, and ask for two networks.
requested_networks = [
(self.nets2[1]['id'], None, None),
(self.nets2[0]['id'], None, None)]
api = self._stub_allocate_for_instance(
net_idx=2, requested_networks=requested_networks,
macs=set(['my_mac2']),
_break='mac' + self.nets2[0]['id'])
self.assertRaises(exception.PortNotFree,
api.allocate_for_instance, self.context,
self.instance, requested_networks=requested_networks,
macs=set(['my_mac2']))
def test_allocate_for_instance_two_macs_two_networks(self):
# If two MACs are available and two networks requested, two new ports
# get made and no exceptions raised.
requested_networks = [
(self.nets2[1]['id'], None, None),
(self.nets2[0]['id'], None, None)]
self._allocate_for_instance(
net_idx=2, requested_networks=requested_networks,
macs=set(['my_mac2', 'my_mac1']))
def test_allocate_for_instance_mac_conflicting_requested_port(self):
# specify only first and last network
requested_networks = [(None, None, 'my_portid1')]
api = self._stub_allocate_for_instance(
net_idx=1, requested_networks=requested_networks,
macs=set(['unknown:mac']),
_break='pre_list_networks')
self.assertRaises(exception.PortNotUsable,
api.allocate_for_instance, self.context,
self.instance, requested_networks=requested_networks,
macs=set(['unknown:mac']))
def test_allocate_for_instance_with_requested_networks(self):
# specify only first and last network
requested_networks = [
(net['id'], None, None)
for net in (self.nets3[1], self.nets3[0], self.nets3[2])]
self._allocate_for_instance(net_idx=3,
requested_networks=requested_networks)
def test_allocate_for_instance_with_requested_networks_with_fixedip(self):
# specify only first and last network
requested_networks = [(self.nets1[0]['id'], '10.0.1.0/24', None)]
self._allocate_for_instance(net_idx=1,
requested_networks=requested_networks)
def test_allocate_for_instance_with_requested_networks_with_port(self):
requested_networks = [(None, None, 'myportid1')]
self._allocate_for_instance(net_idx=1,
requested_networks=requested_networks)
def test_allocate_for_instance_no_networks(self):
"""verify the exception thrown when there are no networks defined."""
api = neutronapi.API()
self.moxed_client.list_networks(
tenant_id=self.instance['project_id'],
shared=False).AndReturn(
{'networks': model.NetworkInfo([])})
self.moxed_client.list_networks(shared=True).AndReturn(
{'networks': model.NetworkInfo([])})
self.mox.ReplayAll()
nwinfo = api.allocate_for_instance(self.context, self.instance)
self.assertEqual(len(nwinfo), 0)
def test_allocate_for_instance_ex1(self):
"""verify we will delete created ports
if we fail to allocate all net resources.
Mox to raise exception when creating a second port.
In this case, the code should delete the first created port.
"""
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg()).MultipleTimes().\
AndReturn(False)
self.moxed_client.list_networks(
tenant_id=self.instance['project_id'],
shared=False).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_networks(shared=True).AndReturn(
{'networks': []})
index = 0
for network in self.nets2:
binding_port_req_body = {
'port': {
'device_id': self.instance['uuid'],
'device_owner': 'compute:nova',
},
}
port_req_body = {
'port': {
'network_id': network['id'],
'admin_state_up': True,
'tenant_id': self.instance['project_id'],
},
}
port_req_body['port'].update(binding_port_req_body['port'])
port = {'id': 'portid_' + network['id']}
api._populate_neutron_extension_values(self.context,
self.instance, binding_port_req_body).AndReturn(None)
if index == 0:
self.moxed_client.create_port(
MyComparator(port_req_body)).AndReturn({'port': port})
else:
NeutronOverQuota = exceptions.NeutronClientException(
message="Quota exceeded for resources: ['port']",
status_code=409)
self.moxed_client.create_port(
MyComparator(port_req_body)).AndRaise(NeutronOverQuota)
index += 1
self.moxed_client.delete_port('portid_' + self.nets2[0]['id'])
self.mox.ReplayAll()
self.assertRaises(exception.PortLimitExceeded,
api.allocate_for_instance,
self.context, self.instance)
def test_allocate_for_instance_ex2(self):
"""verify we have no port to delete
if we fail to allocate the first net resource.
Mox to raise exception when creating the first port.
In this case, the code should not delete any ports.
"""
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg()).MultipleTimes().\
AndReturn(False)
self.moxed_client.list_networks(
tenant_id=self.instance['project_id'],
shared=False).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_networks(shared=True).AndReturn(
{'networks': []})
binding_port_req_body = {
'port': {
'device_id': self.instance['uuid'],
'device_owner': 'compute:nova',
},
}
port_req_body = {
'port': {
'network_id': self.nets2[0]['id'],
'admin_state_up': True,
'device_id': self.instance['uuid'],
'tenant_id': self.instance['project_id'],
},
}
api._populate_neutron_extension_values(self.context,
self.instance, binding_port_req_body).AndReturn(None)
self.moxed_client.create_port(
MyComparator(port_req_body)).AndRaise(
Exception("fail to create port"))
self.mox.ReplayAll()
self.assertRaises(NEUTRON_CLIENT_EXCEPTION, api.allocate_for_instance,
self.context, self.instance)
def test_allocate_for_instance_no_port_or_network(self):
class BailOutEarly(Exception):
pass
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_get_available_networks')
# Make sure we get an empty list and then bail out of the rest
# of the function
api._get_available_networks(self.context, self.instance['project_id'],
[]).AndRaise(BailOutEarly)
self.mox.ReplayAll()
self.assertRaises(BailOutEarly,
api.allocate_for_instance,
self.context, self.instance,
requested_networks=[(None, None, None)])
def test_allocate_for_instance_second_time(self):
# Make sure that allocate_for_instance only returns ports that it
# allocated during _that_ run.
new_port = {'id': 'fake'}
self._returned_nw_info = self.port_data1 + [new_port]
nw_info = self._allocate_for_instance()
self.assertEqual(nw_info, [new_port])
def test_allocate_for_instance_port_in_use(self):
# If a port is already in use, an exception should be raised.
requested_networks = [(None, None, 'my_portid1')]
api = self._stub_allocate_for_instance(
requested_networks=requested_networks,
_break='pre_list_networks',
_device=True)
self.assertRaises(exception.PortInUse,
api.allocate_for_instance, self.context,
self.instance, requested_networks=requested_networks)
def _deallocate_for_instance(self, number, requested_networks=None):
api = neutronapi.API()
port_data = number == 1 and self.port_data1 or self.port_data2
ret_data = copy.deepcopy(port_data)
if requested_networks:
for net, fip, port in requested_networks:
ret_data.append({'network_id': net,
'device_id': self.instance['uuid'],
'device_owner': 'compute:nova',
'id': port,
'status': 'DOWN',
'admin_state_up': True,
'fixed_ips': [],
'mac_address': 'fake_mac', })
self.moxed_client.list_ports(
device_id=self.instance['uuid']).AndReturn(
{'ports': ret_data})
if requested_networks:
for net, fip, port in requested_networks:
self.moxed_client.update_port(port)
for port in reversed(port_data):
self.moxed_client.delete_port(port['id'])
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(self.context,
self.instance['uuid'],
{'network_info': '[]'})
self.mox.ReplayAll()
api = neutronapi.API()
api.deallocate_for_instance(self.context, self.instance,
requested_networks=requested_networks)
def test_deallocate_for_instance_1_with_requested(self):
requested = [('fake-net', 'fake-fip', 'fake-port')]
# Test to deallocate in one port env.
self._deallocate_for_instance(1, requested_networks=requested)
def test_deallocate_for_instance_2_with_requested(self):
requested = [('fake-net', 'fake-fip', 'fake-port')]
# Test to deallocate in one port env.
self._deallocate_for_instance(2, requested_networks=requested)
def test_deallocate_for_instance_1(self):
# Test to deallocate in one port env.
self._deallocate_for_instance(1)
def test_deallocate_for_instance_2(self):
# Test to deallocate in two ports env.
self._deallocate_for_instance(2)
def test_deallocate_for_instance_port_not_found(self):
port_data = self.port_data1
self.moxed_client.list_ports(
device_id=self.instance['uuid']).AndReturn(
{'ports': port_data})
NeutronNotFound = neutronv2.exceptions.NeutronClientException(
status_code=404)
for port in reversed(port_data):
self.moxed_client.delete_port(port['id']).AndRaise(
NeutronNotFound)
self.mox.ReplayAll()
api = neutronapi.API()
api.deallocate_for_instance(self.context, self.instance)
def _test_deallocate_port_for_instance(self, number):
port_data = number == 1 and self.port_data1 or self.port_data2
nets = number == 1 and self.nets1 or self.nets2
self.moxed_client.delete_port(port_data[0]['id'])
net_info_cache = []
for port in port_data:
net_info_cache.append({"network": {"id": port['network_id']},
"id": port['id']})
instance = copy.copy(self.instance)
instance['info_cache'] = {'network_info':
six.text_type(
jsonutils.dumps(net_info_cache))}
api = neutronapi.API()
neutronv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': port_data[1:]})
neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
net_ids = [port['network_id'] for port in port_data]
self.moxed_client.list_networks(id=net_ids).AndReturn(
{'networks': nets})
float_data = number == 1 and self.float_data1 or self.float_data2
for data in port_data[1:]:
for ip in data['fixed_ips']:
self.moxed_client.list_floatingips(
fixed_ip_address=ip['ip_address'],
port_id=data['id']).AndReturn(
{'floatingips': float_data[1:]})
for port in port_data[1:]:
self.moxed_client.list_subnets(id=['my_subid2']).AndReturn({})
self.mox.ReplayAll()
nwinfo = api.deallocate_port_for_instance(self.context, instance,
port_data[0]['id'])
self.assertEqual(len(nwinfo), len(port_data[1:]))
if len(port_data) > 1:
self.assertEqual(nwinfo[0]['network']['id'], 'my_netid2')
def test_deallocate_port_for_instance_1(self):
# Test to deallocate the first and only port
self._test_deallocate_port_for_instance(1)
def test_deallocate_port_for_instance_2(self):
# Test to deallocate the first port of two
self._test_deallocate_port_for_instance(2)
def test_list_ports(self):
search_opts = {'parm': 'value'}
self.moxed_client.list_ports(**search_opts)
self.mox.ReplayAll()
neutronapi.API().list_ports(self.context, **search_opts)
def test_show_port(self):
self.moxed_client.show_port('foo')
self.mox.ReplayAll()
neutronapi.API().show_port(self.context, 'foo')
def test_validate_networks(self):
requested_networks = [('my_netid1', 'test', None),
('my_netid2', 'test2', None)]
ids = ['my_netid1', 'my_netid2']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': []})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': 50}})
self.mox.ReplayAll()
api = neutronapi.API()
api.validate_networks(self.context, requested_networks, 1)
def test_validate_networks_without_port_quota_on_network_side(self):
requested_networks = [('my_netid1', None, None),
('my_netid2', None, None)]
ids = ['my_netid1', 'my_netid2']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': []})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {}})
self.mox.ReplayAll()
api = neutronapi.API()
api.validate_networks(self.context, requested_networks, 1)
def test_validate_networks_ex_1(self):
requested_networks = [('my_netid1', 'test', None)]
self.moxed_client.list_networks(
id=mox.SameElementsAs(['my_netid1'])).AndReturn(
{'networks': self.nets1})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': []})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': 50}})
self.mox.ReplayAll()
api = neutronapi.API()
try:
api.validate_networks(self.context, requested_networks, 1)
except exception.NetworkNotFound as ex:
self.assertIn("my_netid2", str(ex))
def test_validate_networks_ex_2(self):
requested_networks = [('my_netid1', 'test', None),
('my_netid2', 'test2', None),
('my_netid3', 'test3', None)]
ids = ['my_netid1', 'my_netid2', 'my_netid3']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets1})
self.mox.ReplayAll()
api = neutronapi.API()
try:
api.validate_networks(self.context, requested_networks, 1)
except exception.NetworkNotFound as ex:
self.assertIn("my_netid2, my_netid3", str(ex))
def test_validate_networks_duplicate(self):
"""Verify that the correct exception is thrown when duplicate
network ids are passed to validate_networks.
"""
requested_networks = [('my_netid1', None, None),
('my_netid1', None, None)]
self.mox.ReplayAll()
# Expected call from setUp.
neutronv2.get_client(None)
api = neutronapi.API()
self.assertRaises(exception.NetworkDuplicated,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_not_specified(self):
requested_networks = []
self.moxed_client.list_networks(
tenant_id=self.context.project_id,
shared=False).AndReturn(
{'networks': self.nets1})
self.moxed_client.list_networks(
shared=True).AndReturn(
{'networks': self.nets2})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.NetworkAmbiguous,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_port_not_found(self):
# Verify that the correct exception is thrown when a non existent
# port is passed to validate_networks.
requested_networks = [('my_netid1', None, '3123-ad34-bc43-32332ca33e')]
NeutronNotFound = neutronv2.exceptions.NeutronClientException(
status_code=404)
self.moxed_client.show_port(requested_networks[0][2]).AndRaise(
NeutronNotFound)
self.mox.ReplayAll()
# Expected call from setUp.
neutronv2.get_client(None)
api = neutronapi.API()
self.assertRaises(exception.PortNotFound,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_port_show_rasies_non404(self):
# Verify that the correct exception is thrown when a non existent
# port is passed to validate_networks.
requested_networks = [('my_netid1', None, '3123-ad34-bc43-32332ca33e')]
NeutronNotFound = neutronv2.exceptions.NeutronClientException(
status_code=0)
self.moxed_client.show_port(requested_networks[0][2]).AndRaise(
NeutronNotFound)
self.mox.ReplayAll()
# Expected call from setUp.
neutronv2.get_client(None)
api = neutronapi.API()
self.assertRaises(neutronv2.exceptions.NeutronClientException,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_port_in_use(self):
requested_networks = [(None, None, self.port_data3[0]['id'])]
self.moxed_client.show_port(self.port_data3[0]['id']).\
AndReturn({'port': self.port_data3[0]})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.PortInUse,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_port_no_subnet_id(self):
port_a = self.port_data3[0]
port_a['device_id'] = None
port_a['device_owner'] = None
requested_networks = [(None, None, port_a['id'])]
self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.PortRequiresFixedIP,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_no_subnet_id(self):
requested_networks = [('his_netid4', None, None)]
ids = ['his_netid4']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets4})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.NetworkRequiresSubnet,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_ports_in_same_network(self):
port_a = self.port_data3[0]
port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
'subnet_id': 'subnet_id'}
port_b = self.port_data1[0]
self.assertEqual(port_a['network_id'], port_b['network_id'])
for port in [port_a, port_b]:
port['device_id'] = None
port['device_owner'] = None
requested_networks = [(None, None, port_a['id']),
(None, None, port_b['id'])]
self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.NetworkDuplicated,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_ports_not_in_same_network(self):
port_a = self.port_data3[0]
port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
'subnet_id': 'subnet_id'}
port_b = self.port_data2[1]
self.assertNotEqual(port_a['network_id'], port_b['network_id'])
for port in [port_a, port_b]:
port['device_id'] = None
port['device_owner'] = None
requested_networks = [(None, None, port_a['id']),
(None, None, port_b['id'])]
self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
search_opts = {'id': [port_a['network_id'], port_b['network_id']]}
self.moxed_client.list_networks(
**search_opts).AndReturn({'networks': self.nets2})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': []})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': 50}})
self.mox.ReplayAll()
api = neutronapi.API()
api.validate_networks(self.context, requested_networks, 1)
def test_validate_networks_no_quota(self):
# Test validation for a request for one instance needing
# two ports, where the quota is 2 and 2 ports are in use
# => instances which can be created = 0
requested_networks = [('my_netid1', 'test', None),
('my_netid2', 'test2', None)]
ids = ['my_netid1', 'my_netid2']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': self.port_data2})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': 2}})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 1)
self.assertEqual(max_count, 0)
def test_validate_networks_some_quota(self):
# Test validation for a request for two instance needing
# two ports each, where the quota is 5 and 2 ports are in use
# => instances which can be created = 1
requested_networks = [('my_netid1', 'test', None),
('my_netid2', 'test2', None)]
ids = ['my_netid1', 'my_netid2']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': self.port_data2})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': 5}})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 2)
self.assertEqual(max_count, 1)
def test_validate_networks_unlimited_quota(self):
# Test validation for a request for two instance needing
# two ports each, where the quota is -1 (unlimited)
# => instances which can be created = 1
requested_networks = [('my_netid1', 'test', None),
('my_netid2', 'test2', None)]
ids = ['my_netid1', 'my_netid2']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': self.port_data2})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': -1}})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 2)
self.assertEqual(max_count, 2)
def test_validate_networks_no_quota_but_ports_supplied(self):
# Test validation for a request for one instance needing
# two ports, where the quota is 2 and 2 ports are in use
# but the request includes a port to be used
# => instances which can be created = 1
port_a = self.port_data3[0]
port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
'subnet_id': 'subnet_id'}
port_b = self.port_data2[1]
self.assertNotEqual(port_a['network_id'], port_b['network_id'])
for port in [port_a, port_b]:
port['device_id'] = None
port['device_owner'] = None
requested_networks = [(None, None, port_a['id']),
(None, None, port_b['id'])]
self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
search_opts = {'id': [port_a['network_id'], port_b['network_id']]}
self.moxed_client.list_networks(
**search_opts).AndReturn({'networks': self.nets2})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': self.port_data2})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': 2}})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 1)
self.assertEqual(max_count, 1)
def _mock_list_ports(self, port_data=None):
if port_data is None:
port_data = self.port_data2
address = self.port_address
self.moxed_client.list_ports(
fixed_ips=MyComparator('ip_address=%s' % address)).AndReturn(
{'ports': port_data})
self.mox.ReplayAll()
return address
def test_get_instance_uuids_by_ip_filter(self):
self._mock_list_ports()
filters = {'ip': '^10\\.0\\.1\\.2$'}
api = neutronapi.API()
result = api.get_instance_uuids_by_ip_filter(self.context, filters)
self.assertEqual(self.instance2['uuid'], result[0]['instance_uuid'])
self.assertEqual(self.instance['uuid'], result[1]['instance_uuid'])
def test_get_fixed_ip_by_address_fails_for_no_ports(self):
address = self._mock_list_ports(port_data=[])
api = neutronapi.API()
self.assertRaises(exception.FixedIpNotFoundForAddress,
api.get_fixed_ip_by_address,
self.context, address)
def test_get_fixed_ip_by_address_succeeds_for_1_port(self):
address = self._mock_list_ports(port_data=self.port_data1)
api = neutronapi.API()
result = api.get_fixed_ip_by_address(self.context, address)
self.assertEqual(self.instance2['uuid'], result['instance_uuid'])
def test_get_fixed_ip_by_address_fails_for_more_than_1_port(self):
address = self._mock_list_ports()
api = neutronapi.API()
self.assertRaises(exception.FixedIpAssociatedWithMultipleInstances,
api.get_fixed_ip_by_address,
self.context, address)
def _get_available_networks(self, prv_nets, pub_nets,
req_ids=None, context=None):
api = neutronapi.API()
nets = prv_nets + pub_nets
if req_ids:
mox_list_params = {'id': req_ids}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': nets})
else:
mox_list_params = {'tenant_id': self.instance['project_id'],
'shared': False}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': prv_nets})
mox_list_params = {'shared': True}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': pub_nets})
self.mox.ReplayAll()
rets = api._get_available_networks(
context if context else self.context,
self.instance['project_id'],
req_ids)
self.assertEqual(rets, nets)
def test_get_available_networks_all_private(self):
self._get_available_networks(prv_nets=self.nets2, pub_nets=[])
def test_get_available_networks_all_public(self):
self._get_available_networks(prv_nets=[], pub_nets=self.nets2)
def test_get_available_networks_private_and_public(self):
self._get_available_networks(prv_nets=self.nets1, pub_nets=self.nets4)
def test_get_available_networks_with_network_ids(self):
prv_nets = [self.nets3[0]]
pub_nets = [self.nets3[-1]]
# specify only first and last network
req_ids = [net['id'] for net in (self.nets3[0], self.nets3[-1])]
self._get_available_networks(prv_nets, pub_nets, req_ids)
def test_get_available_networks_with_externalnet_fails(self):
req_ids = [net['id'] for net in self.nets5]
self.assertRaises(
exception.ExternalNetworkAttachForbidden,
self._get_available_networks,
self.nets5, pub_nets=[], req_ids=req_ids)
def test_get_available_networks_with_externalnet_admin_ctx(self):
admin_ctx = context.RequestContext('userid', 'my_tenantid',
is_admin=True)
req_ids = [net['id'] for net in self.nets5]
self._get_available_networks(self.nets5, pub_nets=[],
req_ids=req_ids, context=admin_ctx)
def test_get_floating_ip_pools(self):
api = neutronapi.API()
search_opts = {'router:external': True}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]})
self.mox.ReplayAll()
pools = api.get_floating_ip_pools(self.context)
expected = [{'name': self.fip_pool['name']},
{'name': self.fip_pool_nova['name']}]
self.assertEqual(expected, pools)
def _get_expected_fip_model(self, fip_data, idx=0):
expected = {'id': fip_data['id'],
'address': fip_data['floating_ip_address'],
'pool': self.fip_pool['name'],
'project_id': fip_data['tenant_id'],
'fixed_ip_id': fip_data['port_id'],
'fixed_ip':
{'address': fip_data['fixed_ip_address']},
'instance': ({'uuid': self.port_data2[idx]['device_id']}
if fip_data['port_id']
else None)}
return expected
def _test_get_floating_ip(self, fip_data, idx=0, by_address=False):
api = neutronapi.API()
fip_id = fip_data['id']
net_id = fip_data['floating_network_id']
address = fip_data['floating_ip_address']
if by_address:
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [fip_data]})
else:
self.moxed_client.show_floatingip(fip_id).\
AndReturn({'floatingip': fip_data})
self.moxed_client.show_network(net_id).\
AndReturn({'network': self.fip_pool})
if fip_data['port_id']:
self.moxed_client.show_port(fip_data['port_id']).\
AndReturn({'port': self.port_data2[idx]})
self.mox.ReplayAll()
expected = self._get_expected_fip_model(fip_data, idx)
if by_address:
fip = api.get_floating_ip_by_address(self.context, address)
else:
fip = api.get_floating_ip(self.context, fip_id)
self.assertEqual(expected, fip)
def test_get_floating_ip_unassociated(self):
self._test_get_floating_ip(self.fip_unassociated, idx=0)
def test_get_floating_ip_associated(self):
self._test_get_floating_ip(self.fip_associated, idx=1)
def test_get_floating_ip_by_address(self):
self._test_get_floating_ip(self.fip_unassociated, idx=0,
by_address=True)
def test_get_floating_ip_by_address_associated(self):
self._test_get_floating_ip(self.fip_associated, idx=1,
by_address=True)
def test_get_floating_ip_by_address_not_found(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': []})
self.mox.ReplayAll()
self.assertRaises(exception.FloatingIpNotFoundForAddress,
api.get_floating_ip_by_address,
self.context, address)
def test_get_floating_ip_by_id_not_found(self):
api = neutronapi.API()
NeutronNotFound = neutronv2.exceptions.NeutronClientException(
status_code=404)
floating_ip_id = self.fip_unassociated['id']
self.moxed_client.show_floatingip(floating_ip_id).\
AndRaise(NeutronNotFound)
self.mox.ReplayAll()
self.assertRaises(exception.FloatingIpNotFound,
api.get_floating_ip,
self.context, floating_ip_id)
def test_get_floating_ip_raises_non404(self):
api = neutronapi.API()
NeutronNotFound = neutronv2.exceptions.NeutronClientException(
status_code=0)
floating_ip_id = self.fip_unassociated['id']
self.moxed_client.show_floatingip(floating_ip_id).\
AndRaise(NeutronNotFound)
self.mox.ReplayAll()
self.assertRaises(neutronv2.exceptions.NeutronClientException,
api.get_floating_ip,
self.context, floating_ip_id)
def test_get_floating_ip_by_address_multiple_found(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_unassociated] * 2})
self.mox.ReplayAll()
self.assertRaises(exception.FloatingIpMultipleFoundForAddress,
api.get_floating_ip_by_address,
self.context, address)
def test_get_floating_ips_by_project(self):
api = neutronapi.API()
project_id = self.context.project_id
self.moxed_client.list_floatingips(tenant_id=project_id).\
AndReturn({'floatingips': [self.fip_unassociated,
self.fip_associated]})
search_opts = {'router:external': True}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]})
self.moxed_client.list_ports(tenant_id=project_id).\
AndReturn({'ports': self.port_data2})
self.mox.ReplayAll()
expected = [self._get_expected_fip_model(self.fip_unassociated),
self._get_expected_fip_model(self.fip_associated, idx=1)]
fips = api.get_floating_ips_by_project(self.context)
self.assertEqual(expected, fips)
def _test_get_instance_id_by_floating_address(self, fip_data,
associated=False):
api = neutronapi.API()
address = fip_data['floating_ip_address']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [fip_data]})
if associated:
self.moxed_client.show_port(fip_data['port_id']).\
AndReturn({'port': self.port_data2[1]})
self.mox.ReplayAll()
if associated:
expected = self.port_data2[1]['device_id']
else:
expected = None
fip = api.get_instance_id_by_floating_address(self.context, address)
self.assertEqual(expected, fip)
def test_get_instance_id_by_floating_address(self):
self._test_get_instance_id_by_floating_address(self.fip_unassociated)
def test_get_instance_id_by_floating_address_associated(self):
self._test_get_instance_id_by_floating_address(self.fip_associated,
associated=True)
def test_allocate_floating_ip(self):
api = neutronapi.API()
pool_name = self.fip_pool['name']
pool_id = self.fip_pool['id']
search_opts = {'router:external': True,
'fields': 'id',
'name': pool_name}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndReturn({'floatingip': self.fip_unassociated})
self.mox.ReplayAll()
fip = api.allocate_floating_ip(self.context, 'ext_net')
self.assertEqual(fip, self.fip_unassociated['floating_ip_address'])
def test_allocate_floating_ip_addr_gen_fail(self):
api = neutronapi.API()
pool_name = self.fip_pool['name']
pool_id = self.fip_pool['id']
search_opts = {'router:external': True,
'fields': 'id',
'name': pool_name}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndRaise(exceptions.IpAddressGenerationFailureClient)
self.mox.ReplayAll()
self.assertRaises(exception.NoMoreFloatingIps,
api.allocate_floating_ip, self.context, 'ext_net')
def test_allocate_floating_ip_exhausted_fail(self):
api = neutronapi.API()
pool_name = self.fip_pool['name']
pool_id = self.fip_pool['id']
search_opts = {'router:external': True,
'fields': 'id',
'name': pool_name}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndRaise(exceptions.ExternalIpAddressExhaustedClient)
self.mox.ReplayAll()
self.assertRaises(exception.NoMoreFloatingIps,
api.allocate_floating_ip, self.context, 'ext_net')
def test_allocate_floating_ip_with_pool_id(self):
api = neutronapi.API()
pool_id = self.fip_pool['id']
search_opts = {'router:external': True,
'fields': 'id',
'id': pool_id}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndReturn({'floatingip': self.fip_unassociated})
self.mox.ReplayAll()
fip = api.allocate_floating_ip(self.context, pool_id)
self.assertEqual(fip, self.fip_unassociated['floating_ip_address'])
def test_allocate_floating_ip_with_default_pool(self):
api = neutronapi.API()
pool_name = self.fip_pool_nova['name']
pool_id = self.fip_pool_nova['id']
search_opts = {'router:external': True,
'fields': 'id',
'name': pool_name}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool_nova]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndReturn({'floatingip': self.fip_unassociated})
self.mox.ReplayAll()
fip = api.allocate_floating_ip(self.context)
self.assertEqual(fip, self.fip_unassociated['floating_ip_address'])
def test_release_floating_ip(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
fip_id = self.fip_unassociated['id']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_unassociated]})
self.moxed_client.delete_floatingip(fip_id)
self.mox.ReplayAll()
api.release_floating_ip(self.context, address)
def test_disassociate_and_release_floating_ip(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
fip_id = self.fip_unassociated['id']
floating_ip = {'address': address}
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_unassociated]})
self.moxed_client.delete_floatingip(fip_id)
self.mox.ReplayAll()
api.disassociate_and_release_floating_ip(self.context, None,
floating_ip)
def test_release_floating_ip_associated(self):
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_associated]})
self.mox.ReplayAll()
self.assertRaises(exception.FloatingIpAssociated,
api.release_floating_ip, self.context, address)
def _setup_mock_for_refresh_cache(self, api, instances):
nw_info = self.mox.CreateMock(model.NetworkInfo)
self.mox.StubOutWithMock(api, '_get_instance_nw_info')
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
for instance in instances:
nw_info.json()
api._get_instance_nw_info(mox.IgnoreArg(), instance).\
AndReturn(nw_info)
api.db.instance_info_cache_update(mox.IgnoreArg(),
instance['uuid'],
mox.IgnoreArg())
def test_associate_floating_ip(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
fixed_address = self.port_address2
fip_id = self.fip_unassociated['id']
search_opts = {'device_owner': 'compute:nova',
'device_id': self.instance['uuid']}
self.moxed_client.list_ports(**search_opts).\
AndReturn({'ports': [self.port_data2[1]]})
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_unassociated]})
self.moxed_client.update_floatingip(
fip_id, {'floatingip': {'port_id': self.fip_associated['port_id'],
'fixed_ip_address': fixed_address}})
self._setup_mock_for_refresh_cache(api, [self.instance])
self.mox.ReplayAll()
api.associate_floating_ip(self.context, self.instance,
address, fixed_address)
def test_reassociate_floating_ip(self):
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
new_fixed_address = self.port_address
fip_id = self.fip_associated['id']
search_opts = {'device_owner': 'compute:nova',
'device_id': self.instance2['uuid']}
self.moxed_client.list_ports(**search_opts).\
AndReturn({'ports': [self.port_data2[0]]})
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_associated]})
self.moxed_client.update_floatingip(
fip_id, {'floatingip': {'port_id': 'my_portid1',
'fixed_ip_address': new_fixed_address}})
self.moxed_client.show_port(self.fip_associated['port_id']).\
AndReturn({'port': self.port_data2[1]})
self.mox.StubOutWithMock(api.db, 'instance_get_by_uuid')
api.db.instance_get_by_uuid(mox.IgnoreArg(),
self.instance['uuid']).\
AndReturn(self.instance)
self._setup_mock_for_refresh_cache(api, [self.instance,
self.instance2])
self.mox.ReplayAll()
api.associate_floating_ip(self.context, self.instance2,
address, new_fixed_address)
def test_associate_floating_ip_not_found_fixed_ip(self):
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
fixed_address = self.fip_associated['fixed_ip_address']
search_opts = {'device_owner': 'compute:nova',
'device_id': self.instance['uuid']}
self.moxed_client.list_ports(**search_opts).\
AndReturn({'ports': [self.port_data2[0]]})
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpNotFoundForAddress,
api.associate_floating_ip, self.context,
self.instance, address, fixed_address)
def test_disassociate_floating_ip(self):
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
fip_id = self.fip_associated['id']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_associated]})
self.moxed_client.update_floatingip(
fip_id, {'floatingip': {'port_id': None}})
self._setup_mock_for_refresh_cache(api, [self.instance])
self.mox.ReplayAll()
api.disassociate_floating_ip(self.context, self.instance, address)
def test_add_fixed_ip_to_instance(self):
api = neutronapi.API()
self._setup_mock_for_refresh_cache(api, [self.instance])
network_id = 'my_netid1'
search_opts = {'network_id': network_id}
self.moxed_client.list_subnets(
**search_opts).AndReturn({'subnets': self.subnet_data_n})
search_opts = {'device_id': self.instance['uuid'],
'device_owner': 'compute:nova',
'network_id': network_id}
self.moxed_client.list_ports(
**search_opts).AndReturn({'ports': self.port_data1})
port_req_body = {
'port': {
'fixed_ips': [{'subnet_id': 'my_subid1'},
{'subnet_id': 'my_subid1'}],
},
}
port = self.port_data1[0]
port['fixed_ips'] = [{'subnet_id': 'my_subid1'}]
self.moxed_client.update_port('my_portid1',
MyComparator(port_req_body)).AndReturn({'port': port})
self.mox.ReplayAll()
api.add_fixed_ip_to_instance(self.context, self.instance, network_id)
def test_remove_fixed_ip_from_instance(self):
api = neutronapi.API()
self._setup_mock_for_refresh_cache(api, [self.instance])
address = '10.0.0.3'
zone = 'compute:%s' % self.instance['availability_zone']
search_opts = {'device_id': self.instance['uuid'],
'device_owner': zone,
'fixed_ips': 'ip_address=%s' % address}
self.moxed_client.list_ports(
**search_opts).AndReturn({'ports': self.port_data1})
port_req_body = {
'port': {
'fixed_ips': [],
},
}
port = self.port_data1[0]
port['fixed_ips'] = []
self.moxed_client.update_port('my_portid1',
MyComparator(port_req_body)).AndReturn({'port': port})
self.mox.ReplayAll()
api.remove_fixed_ip_from_instance(self.context, self.instance, address)
def test_list_floating_ips_without_l3_support(self):
api = neutronapi.API()
NeutronNotFound = exceptions.NeutronClientException(
status_code=404)
self.moxed_client.list_floatingips(
fixed_ip_address='1.1.1.1', port_id=1).AndRaise(NeutronNotFound)
self.mox.ReplayAll()
neutronv2.get_client('fake')
floatingips = api._get_floating_ips_by_fixed_and_port(
self.moxed_client, '1.1.1.1', 1)
self.assertEqual(floatingips, [])
def test_nw_info_get_ips(self):
fake_port = {
'fixed_ips': [
{'ip_address': '1.1.1.1'}],
'id': 'port-id',
}
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_get_floating_ips_by_fixed_and_port')
api._get_floating_ips_by_fixed_and_port(
self.moxed_client, '1.1.1.1', 'port-id').AndReturn(
[{'floating_ip_address': '10.0.0.1'}])
self.mox.ReplayAll()
neutronv2.get_client('fake')
result = api._nw_info_get_ips(self.moxed_client, fake_port)
self.assertEqual(len(result), 1)
self.assertEqual(result[0]['address'], '1.1.1.1')
self.assertEqual(result[0]['floating_ips'][0]['address'], '10.0.0.1')
def test_nw_info_get_subnets(self):
fake_port = {
'fixed_ips': [
{'ip_address': '1.1.1.1'},
{'ip_address': '2.2.2.2'}],
'id': 'port-id',
}
fake_subnet = model.Subnet(cidr='1.0.0.0/8')
fake_ips = [model.IP(x['ip_address']) for x in fake_port['fixed_ips']]
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_get_subnets_from_port')
api._get_subnets_from_port(self.context, fake_port).AndReturn(
[fake_subnet])
self.mox.ReplayAll()
neutronv2.get_client('fake')
subnets = api._nw_info_get_subnets(self.context, fake_port, fake_ips)
self.assertEqual(len(subnets), 1)
self.assertEqual(len(subnets[0]['ips']), 1)
self.assertEqual(subnets[0]['ips'][0]['address'], '1.1.1.1')
def _test_nw_info_build_network(self, vif_type):
fake_port = {
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'id': 'port-id',
'network_id': 'net-id',
'binding:vif_type': vif_type,
}
fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
fake_nets = [{'id': 'net-id', 'name': 'foo', 'tenant_id': 'tenant'}]
api = neutronapi.API()
self.mox.ReplayAll()
neutronv2.get_client('fake')
net, iid = api._nw_info_build_network(fake_port, fake_nets,
fake_subnets)
self.assertEqual(net['subnets'], fake_subnets)
self.assertEqual(net['id'], 'net-id')
self.assertEqual(net['label'], 'foo')
self.assertEqual(net.get_meta('tenant_id'), 'tenant')
self.assertEqual(net.get_meta('injected'), CONF.flat_injected)
return net, iid
def test_nw_info_build_network_ovs(self):
net, iid = self._test_nw_info_build_network(model.VIF_TYPE_OVS)
self.assertEqual(net['bridge'], CONF.neutron_ovs_bridge)
self.assertNotIn('should_create_bridge', net)
self.assertEqual(iid, 'port-id')
def test_nw_info_build_network_bridge(self):
net, iid = self._test_nw_info_build_network(model.VIF_TYPE_BRIDGE)
self.assertEqual(net['bridge'], 'brqnet-id')
self.assertTrue(net['should_create_bridge'])
self.assertIsNone(iid)
def test_nw_info_build_network_other(self):
net, iid = self._test_nw_info_build_network(None)
self.assertIsNone(net['bridge'])
self.assertNotIn('should_create_bridge', net)
self.assertIsNone(iid)
def test_nw_info_build_no_match(self):
fake_port = {
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'id': 'port-id',
'network_id': 'net-id1',
'tenant_id': 'tenant',
'binding:vif_type': model.VIF_TYPE_OVS,
}
fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
fake_nets = [{'id': 'net-id2', 'name': 'foo', 'tenant_id': 'tenant'}]
api = neutronapi.API()
self.mox.ReplayAll()
neutronv2.get_client('fake')
net, iid = api._nw_info_build_network(fake_port, fake_nets,
fake_subnets)
self.assertEqual(fake_subnets, net['subnets'])
self.assertEqual('net-id1', net['id'])
self.assertEqual('net-id1', net['id'])
self.assertEqual('tenant', net['meta']['tenant_id'])
def test_build_network_info_model(self):
api = neutronapi.API()
fake_inst = {'project_id': 'fake', 'uuid': 'uuid',
'info_cache': {'network_info': []}}
fake_ports = [
# admin_state_up=True and status='ACTIVE' thus vif.active=True
{'id': 'port1',
'network_id': 'net-id',
'admin_state_up': True,
'status': 'ACTIVE',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:01',
'binding:vif_type': model.VIF_TYPE_BRIDGE,
},
# admin_state_up=False and status='DOWN' thus vif.active=True
{'id': 'port2',
'network_id': 'net-id',
'admin_state_up': False,
'status': 'DOWN',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:02',
'binding:vif_type': model.VIF_TYPE_BRIDGE,
},
# admin_state_up=True and status='DOWN' thus vif.active=False
{'id': 'port0',
'network_id': 'net-id',
'admin_state_up': True,
'status': 'DOWN',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:03',
'binding:vif_type': model.VIF_TYPE_BRIDGE,
},
# This does not match the networks we provide below,
# so it should be ignored (and is here to verify that)
{'id': 'port3',
'network_id': 'other-net-id',
'admin_state_up': True,
'status': 'DOWN',
},
]
fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
fake_nets = [
{'id': 'net-id',
'name': 'foo',
'tenant_id': 'fake',
}
]
neutronv2.get_client(mox.IgnoreArg(), admin=True).MultipleTimes(
).AndReturn(self.moxed_client)
self.moxed_client.list_ports(
tenant_id='fake', device_id='uuid').AndReturn(
{'ports': fake_ports})
self.mox.StubOutWithMock(api, '_get_floating_ips_by_fixed_and_port')
self.mox.StubOutWithMock(api, '_get_subnets_from_port')
requested_ports = [fake_ports[2], fake_ports[0], fake_ports[1]]
for requested_port in requested_ports:
api._get_floating_ips_by_fixed_and_port(
self.moxed_client, '1.1.1.1', requested_port['id']).AndReturn(
[{'floating_ip_address': '10.0.0.1'}])
for requested_port in requested_ports:
api._get_subnets_from_port(self.context, requested_port
).AndReturn(fake_subnets)
self.mox.ReplayAll()
neutronv2.get_client('fake')
nw_infos = api._build_network_info_model(self.context, fake_inst,
fake_nets,
[fake_ports[2]['id'],
fake_ports[0]['id'],
fake_ports[1]['id']])
self.assertEqual(len(nw_infos), 3)
index = 0
for nw_info in nw_infos:
self.assertEqual(nw_info['address'],
requested_ports[index]['mac_address'])
self.assertEqual(nw_info['devname'], 'tapport' + str(index))
self.assertIsNone(nw_info['ovs_interfaceid'])
self.assertEqual(nw_info['type'], model.VIF_TYPE_BRIDGE)
self.assertEqual(nw_info['network']['bridge'], 'brqnet-id')
index += 1
self.assertEqual(nw_infos[0]['active'], False)
self.assertEqual(nw_infos[1]['active'], True)
self.assertEqual(nw_infos[2]['active'], True)
self.assertEqual(nw_infos[0]['id'], 'port0')
self.assertEqual(nw_infos[1]['id'], 'port1')
self.assertEqual(nw_infos[2]['id'], 'port2')
def test_get_all_empty_list_networks(self):
api = neutronapi.API()
self.moxed_client.list_networks().AndReturn({'networks': []})
self.mox.ReplayAll()
networks = api.get_all(self.context)
self.assertEqual(networks, [])
def test_get_floating_ips_by_fixed_address(self):
# NOTE(lbragstad): We need to reset the mocks in order to assert
# a NotImplementedError is raised when calling the method under test.
self.mox.ResetAll()
fake_fixed = '192.168.1.4'
api = neutronapi.API()
self.assertRaises(NotImplementedError,
api.get_floating_ips_by_fixed_address,
self.context, fake_fixed)
class TestNeutronv2WithMock(test.TestCase):
"""Used to test Neutron V2 API with mock."""
def setUp(self):
super(TestNeutronv2WithMock, self).setUp()
self.api = neutronapi.API()
self.context = context.RequestContext(
'fake-user', 'fake-project',
auth_token='bff4a5a6b9eb4ea2a6efec6eefb77936')
@mock.patch('nova.openstack.common.lockutils.lock')
def test_get_instance_nw_info_locks_per_instance(self, mock_lock):
instance = instance_obj.Instance(uuid=uuid.uuid4())
api = neutronapi.API()
mock_lock.side_effect = test.TestingException
self.assertRaises(test.TestingException,
api.get_instance_nw_info, 'context', instance)
mock_lock.assert_called_once_with('refresh_cache-%s' % instance.uuid)
class TestNeutronv2ModuleMethods(test.TestCase):
def test_gather_port_ids_and_networks_wrong_params(self):
api = neutronapi.API()
# Test with networks not None and port_ids is None
self.assertRaises(exception.NovaException,
api._gather_port_ids_and_networks,
'fake_context', 'fake_instance',
[{'network': {'name': 'foo'}}], None)
# Test with networks is None and port_ids not None
self.assertRaises(exception.NovaException,
api._gather_port_ids_and_networks,
'fake_context', 'fake_instance',
None, ['list', 'of', 'port_ids'])
def test_ensure_requested_network_ordering_no_preference_ids(self):
l = [1, 2, 3]
neutronapi._ensure_requested_network_ordering(
lambda x: x,
l,
None)
def test_ensure_requested_network_ordering_no_preference_hashes(self):
l = [{'id': 3}, {'id': 1}, {'id': 2}]
neutronapi._ensure_requested_network_ordering(
lambda x: x['id'],
l,
None)
self.assertEqual(l, [{'id': 3}, {'id': 1}, {'id': 2}])
def test_ensure_requested_network_ordering_with_preference(self):
l = [{'id': 3}, {'id': 1}, {'id': 2}]
neutronapi._ensure_requested_network_ordering(
lambda x: x['id'],
l,
[1, 2, 3])
self.assertEqual(l, [{'id': 1}, {'id': 2}, {'id': 3}])
class TestNeutronv2Portbinding(TestNeutronv2Base):
def test_allocate_for_instance_portbinding(self):
self._allocate_for_instance(1, portbinding=True)
def test_populate_neutron_extension_values_binding(self):
api = neutronapi.API()
neutronv2.get_client(mox.IgnoreArg()).AndReturn(
self.moxed_client)
self.moxed_client.list_extensions().AndReturn(
{'extensions': [{'name': constants.PORTBINDING_EXT}]})
self.mox.ReplayAll()
host_id = 'my_host_id'
instance = {'host': host_id}
port_req_body = {'port': {}}
api._populate_neutron_extension_values(self.context, instance,
port_req_body)
self.assertEqual(port_req_body['port']['binding:host_id'], host_id)
def test_migrate_instance_finish_binding_false(self):
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg(),
refresh_cache=True).AndReturn(False)
self.mox.ReplayAll()
api.migrate_instance_finish(self.context, None, None)
def test_migrate_instance_finish_binding_true(self):
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg(),
refresh_cache=True).AndReturn(True)
neutronv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
search_opts = {'device_id': self.instance['uuid'],
'tenant_id': self.instance['project_id']}
ports = {'ports': [{'id': 'test1'}]}
self.moxed_client.list_ports(**search_opts).AndReturn(ports)
migration = {'source_compute': self.instance.get('host'),
'dest_compute': 'dest_host', }
port_req_body = {'port':
{'binding:host_id': migration['dest_compute']}}
self.moxed_client.update_port('test1',
port_req_body).AndReturn(None)
self.mox.ReplayAll()
api.migrate_instance_finish(self.context, self.instance, migration)
def test_migrate_instance_finish_binding_true_exception(self):
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg(),
refresh_cache=True).AndReturn(True)
neutronv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
search_opts = {'device_id': self.instance['uuid'],
'tenant_id': self.instance['project_id']}
ports = {'ports': [{'id': 'test1'}]}
self.moxed_client.list_ports(**search_opts).AndReturn(ports)
migration = {'source_compute': self.instance.get('host'),
'dest_compute': 'dest_host', }
port_req_body = {'port':
{'binding:host_id': migration['dest_compute']}}
self.moxed_client.update_port('test1',
port_req_body).AndRaise(
Exception("fail to update port"))
self.mox.ReplayAll()
self.assertRaises(NEUTRON_CLIENT_EXCEPTION,
api.migrate_instance_finish,
self.context, self.instance, migration)
def test_associate_not_implemented(self):
api = neutronapi.API()
self.assertRaises(NotImplementedError,
api.associate,
self.context, 'id')
class TestNeutronv2ExtraDhcpOpts(TestNeutronv2Base):
def setUp(self):
super(TestNeutronv2ExtraDhcpOpts, self).setUp()
neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
def test_allocate_for_instance_1_with_extra_dhcp_opts_turned_off(self):
self._allocate_for_instance(1, extra_dhcp_opts=False)
def test_allocate_for_instance_extradhcpopts(self):
dhcp_opts = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'}]
self._allocate_for_instance(1, dhcp_options=dhcp_opts)
class TestNeutronClientForAdminScenarios(test.TestCase):
def test_get_cached_neutron_client_for_admin(self):
self.flags(neutron_url='http://anyhost/')
self.flags(neutron_url_timeout=30)
my_context = context.RequestContext('userid',
'my_tenantid',
auth_token='token')
# Make multiple calls and ensure we get the same
# client back again and again
client = neutronv2.get_client(my_context, True)
client2 = neutronv2.get_client(my_context, True)
client3 = neutronv2.get_client(my_context, True)
self.assertEqual(client, client2)
self.assertEqual(client, client3)
# clear the cache
local.strong_store.neutron_client = None
# A new client should be created now
client4 = neutronv2.get_client(my_context, True)
self.assertNotEqual(client, client4)
def test_get_neutron_client_for_non_admin(self):
self.flags(neutron_url='http://anyhost/')
self.flags(neutron_url_timeout=30)
my_context = context.RequestContext('userid',
'my_tenantid',
auth_token='token')
# Multiple calls should return different clients
client = neutronv2.get_client(my_context)
client2 = neutronv2.get_client(my_context)
self.assertNotEqual(client, client2)
def test_get_neutron_client_for_non_admin_and_no_token(self):
self.flags(neutron_url='http://anyhost/')
self.flags(neutron_url_timeout=30)
my_context = context.RequestContext('userid',
'my_tenantid')
self.assertRaises(exceptions.Unauthorized,
neutronv2.get_client,
my_context)
def _test_get_client_for_admin(self, use_id=False, admin_context=False):
self.flags(neutron_auth_strategy=None)
self.flags(neutron_url='http://anyhost/')
self.flags(neutron_url_timeout=30)
if use_id:
self.flags(neutron_admin_tenant_id='admin_tenant_id')
if admin_context:
my_context = context.get_admin_context()
else:
my_context = context.RequestContext('userid', 'my_tenantid',
auth_token='token')
self.mox.StubOutWithMock(client.Client, "__init__")
kwargs = {
'auth_url': CONF.neutron_admin_auth_url,
'password': CONF.neutron_admin_password,
'username': CONF.neutron_admin_username,
'endpoint_url': CONF.neutron_url,
'auth_strategy': None,
'timeout': CONF.neutron_url_timeout,
'insecure': False,
'ca_cert': None}
if use_id:
kwargs['tenant_id'] = CONF.neutron_admin_tenant_id
else:
kwargs['tenant_name'] = CONF.neutron_admin_tenant_name
client.Client.__init__(**kwargs).AndReturn(None)
self.mox.ReplayAll()
# clear the cache
if hasattr(local.strong_store, 'neutron_client'):
delattr(local.strong_store, 'neutron_client')
if admin_context:
# Note that the context does not contain a token but is
# an admin context which will force an elevation to admin
# credentials.
neutronv2.get_client(my_context)
else:
# Note that the context is not elevated, but the True is passed in
# which will force an elevation to admin credentials even though
# the context has an auth_token.
neutronv2.get_client(my_context, True)
def test_get_client_for_admin(self):
self._test_get_client_for_admin()
def test_get_client_for_admin_with_id(self):
self._test_get_client_for_admin(use_id=True)
def test_get_client_for_admin_context(self):
self._test_get_client_for_admin(admin_context=True)
def test_get_client_for_admin_context_with_id(self):
self._test_get_client_for_admin(use_id=True, admin_context=True)
| apache-2.0 |
bkahlert/seqan-research | raw/pmbs12/pmsb13-data-20120615/trunk/misc/seqan_instrumentation/bin/classes/simplejson/tests/test_indent.py | 78 | 2570 | from unittest import TestCase
import simplejson as json
import textwrap
from StringIO import StringIO
class TestIndent(TestCase):
def test_indent(self):
h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh',
'i-vhbjkhnth',
{'nifty': 87}, {'field': 'yes', 'morefield': False} ]
expect = textwrap.dedent("""\
[
\t[
\t\t"blorpie"
\t],
\t[
\t\t"whoops"
\t],
\t[],
\t"d-shtaeou",
\t"d-nthiouh",
\t"i-vhbjkhnth",
\t{
\t\t"nifty": 87
\t},
\t{
\t\t"field": "yes",
\t\t"morefield": false
\t}
]""")
d1 = json.dumps(h)
d2 = json.dumps(h, indent='\t', sort_keys=True, separators=(',', ': '))
d3 = json.dumps(h, indent=' ', sort_keys=True, separators=(',', ': '))
d4 = json.dumps(h, indent=2, sort_keys=True, separators=(',', ': '))
h1 = json.loads(d1)
h2 = json.loads(d2)
h3 = json.loads(d3)
h4 = json.loads(d4)
self.assertEquals(h1, h)
self.assertEquals(h2, h)
self.assertEquals(h3, h)
self.assertEquals(h4, h)
self.assertEquals(d3, expect.replace('\t', ' '))
self.assertEquals(d4, expect.replace('\t', ' '))
# NOTE: Python 2.4 textwrap.dedent converts tabs to spaces,
# so the following is expected to fail. Python 2.4 is not a
# supported platform in simplejson 2.1.0+.
self.assertEquals(d2, expect)
def test_indent0(self):
h = {3: 1}
def check(indent, expected):
d1 = json.dumps(h, indent=indent)
self.assertEquals(d1, expected)
sio = StringIO()
json.dump(h, sio, indent=indent)
self.assertEquals(sio.getvalue(), expected)
# indent=0 should emit newlines
check(0, '{\n"3": 1\n}')
# indent=None is more compact
check(None, '{"3": 1}')
def test_separators(self):
lst = [1,2,3,4]
expect = '[\n1,\n2,\n3,\n4\n]'
expect_spaces = '[\n1, \n2, \n3, \n4\n]'
# Ensure that separators still works
self.assertEquals(
expect_spaces,
json.dumps(lst, indent=0, separators=(', ', ': ')))
# Force the new defaults
self.assertEquals(
expect,
json.dumps(lst, indent=0, separators=(',', ': ')))
# Added in 2.1.4
self.assertEquals(
expect,
json.dumps(lst, indent=0)) | mit |
smartforceplus/SmartForceplus | addons/website_partner/__openerp__.py | 383 | 1498 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Website Partner',
'category': 'Website',
'summary': 'Partner Module for Website',
'version': '0.1',
'description': """Base module holding website-related stuff for partner model""",
'author': 'OpenERP SA',
'depends': ['website'],
'data': [
'views/res_partner_view.xml',
'views/website_partner_view.xml',
'data/website_data.xml',
],
'demo': ['data/demo.xml'],
'qweb': [
],
'installable': True,
'auto_install': False,
}
| agpl-3.0 |
unsiloai/syntaxnet-ops-hack | tensorflow/python/ops/logging_ops.py | 64 | 14358 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logging and Summary Operations."""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_logging_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_logging_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util.deprecation import deprecated
# The python wrapper for Assert is in control_flow_ops, as the Assert
# call relies on certain conditionals for its dependencies. Use
# control_flow_ops.Assert.
# Assert and Print are special symbols in python, so we must
# use an upper-case version of them.
def Print(input_, data, message=None, first_n=None, summarize=None,
name=None):
"""Prints a list of tensors.
This is an identity op with the side effect of printing `data` when
evaluating.
Note: This op prints to the standard error. It is not currently compatible
with jupyter notebook (printing to the notebook *server's* output, not into
the notebook).
Args:
input_: A tensor passed through this op.
data: A list of tensors to print out when op is evaluated.
message: A string, prefix of the error message.
first_n: Only log `first_n` number of times. Negative numbers log always;
this is the default.
summarize: Only print this many entries of each tensor. If None, then a
maximum of 3 elements are printed per input tensor.
name: A name for the operation (optional).
Returns:
Same tensor as `input_`.
"""
return gen_logging_ops._print(input_, data, message, first_n, summarize, name)
@ops.RegisterGradient("Print")
def _PrintGrad(op, *grad):
return list(grad) + [None] * (len(op.inputs) - 1)
def _Collect(val, collections, default_collections):
if collections is None:
collections = default_collections
for key in collections:
ops.add_to_collection(key, val)
@deprecated(
"2016-11-30", "Please switch to tf.summary.histogram. Note that "
"tf.summary.histogram uses the node name instead of the tag. "
"This means that TensorFlow will automatically de-duplicate summary "
"names based on the scope they are created in.")
def histogram_summary(tag, values, collections=None, name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with a histogram.
This ops is deprecated. Please switch to tf.summary.histogram.
For an explanation of why this op was deprecated, and information on how to
migrate, look ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py)
The generated
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
has one summary value containing a histogram for `values`.
This op reports an `InvalidArgument` error if any value is not finite.
Args:
tag: A `string` `Tensor`. 0-D. Tag to use for the summary value.
values: A real numeric `Tensor`. Any shape. Values to use to
build the histogram.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "HistogramSummary", [tag, values]) as scope:
val = gen_logging_ops._histogram_summary(
tag=tag, values=values, name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
@deprecated(
"2016-11-30", "Please switch to tf.summary.image. Note that "
"tf.summary.image uses the node name instead of the tag. "
"This means that TensorFlow will automatically de-duplicate summary "
"names based on the scope they are created in. Also, the max_images "
"argument was renamed to max_outputs.")
def image_summary(tag, tensor, max_images=3, collections=None, name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with images.
For an explanation of why this op was deprecated, and information on how to
migrate, look ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py)
The summary has up to `max_images` summary values containing images. The
images are built from `tensor` which must be 4-D with shape `[batch_size,
height, width, channels]` and where `channels` can be:
* 1: `tensor` is interpreted as Grayscale.
* 3: `tensor` is interpreted as RGB.
* 4: `tensor` is interpreted as RGBA.
The images have the same number of channels as the input tensor. For float
input, the values are normalized one image at a time to fit in the range
`[0, 255]`. `uint8` values are unchanged. The op uses two different
normalization algorithms:
* If the input values are all positive, they are rescaled so the largest one
is 255.
* If any input value is negative, the values are shifted so input value 0.0
is at 127. They are then rescaled so that either the smallest value is 0,
or the largest one is 255.
The `tag` argument is a scalar `Tensor` of type `string`. It is used to
build the `tag` of the summary values:
* If `max_images` is 1, the summary value tag is '*tag*/image'.
* If `max_images` is greater than 1, the summary value tags are
generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
Args:
tag: A scalar `Tensor` of type `string`. Used to build the `tag`
of the summary values.
tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height,
width, channels]` where `channels` is 1, 3, or 4.
max_images: Max number of batch elements to generate images for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [ops.GraphKeys.SUMMARIES]
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "ImageSummary", [tag, tensor]) as scope:
val = gen_logging_ops._image_summary(
tag=tag, tensor=tensor, max_images=max_images, name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
@deprecated(
"2016-11-30", "Please switch to tf.summary.audio. Note that "
"tf.summary.audio uses the node name instead of the tag. "
"This means that TensorFlow will automatically de-duplicate summary "
"names based on the scope they are created in.")
def audio_summary(tag,
tensor,
sample_rate,
max_outputs=3,
collections=None,
name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with audio.
This op is deprecated. Please switch to tf.summary.audio.
For an explanation of why this op was deprecated, and information on how to
migrate, look ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py)
The summary has up to `max_outputs` summary values containing audio. The
audio is built from `tensor` which must be 3-D with shape `[batch_size,
frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
assumed to be in the range of `[-1.0, 1.0]` with a sample rate of
`sample_rate`.
The `tag` argument is a scalar `Tensor` of type `string`. It is used to
build the `tag` of the summary values:
* If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
Args:
tag: A scalar `Tensor` of type `string`. Used to build the `tag`
of the summary values.
tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]`
or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`.
sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the
signal in hertz.
max_outputs: Max number of batch elements to generate audio for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [ops.GraphKeys.SUMMARIES]
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "AudioSummary", [tag, tensor]) as scope:
sample_rate = ops.convert_to_tensor(sample_rate, dtype=dtypes.float32,
name="sample_rate")
val = gen_logging_ops._audio_summary_v2(tag=tag,
tensor=tensor,
max_outputs=max_outputs,
sample_rate=sample_rate,
name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
@deprecated("2016-11-30", "Please switch to tf.summary.merge.")
def merge_summary(inputs, collections=None, name=None):
# pylint: disable=line-too-long
"""Merges summaries.
This op is deprecated. Please switch to tf.summary.merge, which has identical
behavior.
This op creates a
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
protocol buffer that contains the union of all the values in the input
summaries.
When the Op is run, it reports an `InvalidArgument` error if multiple values
in the summaries to merge use the same tag.
Args:
inputs: A list of `string` `Tensor` objects containing serialized `Summary`
protocol buffers.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer resulting from the merging.
"""
with ops.name_scope(name, "MergeSummary", inputs):
val = gen_logging_ops._merge_summary(inputs=inputs, name=name)
_Collect(val, collections, [])
return val
@deprecated("2016-11-30", "Please switch to tf.summary.merge_all.")
def merge_all_summaries(key=ops.GraphKeys.SUMMARIES):
"""Merges all summaries collected in the default graph.
This op is deprecated. Please switch to tf.summary.merge_all, which has
identical behavior.
Args:
key: `GraphKey` used to collect the summaries. Defaults to
`GraphKeys.SUMMARIES`.
Returns:
If no summaries were collected, returns None. Otherwise returns a scalar
`Tensor` of type `string` containing the serialized `Summary` protocol
buffer resulting from the merging.
"""
summary_ops = ops.get_collection(key)
if not summary_ops:
return None
else:
return merge_summary(summary_ops)
def get_summary_op():
"""Returns a single Summary op that would run all summaries.
Either existing one from `SUMMARY_OP` collection or merges all existing
summaries.
Returns:
If no summaries were collected, returns None. Otherwise returns a scalar
`Tensor` of type `string` containing the serialized `Summary` protocol
buffer resulting from the merging.
"""
summary_op = ops.get_collection(ops.GraphKeys.SUMMARY_OP)
if summary_op is not None:
if summary_op:
summary_op = summary_op[0]
else:
summary_op = None
if summary_op is None:
summary_op = merge_all_summaries()
if summary_op is not None:
ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)
return summary_op
@deprecated(
"2016-11-30", "Please switch to tf.summary.scalar. Note that "
"tf.summary.scalar uses the node name instead of the tag. "
"This means that TensorFlow will automatically de-duplicate summary "
"names based on the scope they are created in. Also, passing a "
"tensor or list of tags to a scalar summary op is no longer "
"supported.")
def scalar_summary(tags, values, collections=None, name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with scalar values.
This ops is deprecated. Please switch to tf.summary.scalar.
For an explanation of why this op was deprecated, and information on how to
migrate, look ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py)
The input `tags` and `values` must have the same shape. The generated
summary has a summary value for each tag-value pair in `tags` and `values`.
Args:
tags: A `string` `Tensor`. Tags for the summaries.
values: A real numeric Tensor. Values for the summaries.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "ScalarSummary", [tags, values]) as scope:
val = gen_logging_ops._scalar_summary(tags=tags, values=values, name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
ops.NotDifferentiable("HistogramSummary")
ops.NotDifferentiable("ImageSummary")
ops.NotDifferentiable("AudioSummary")
ops.NotDifferentiable("AudioSummaryV2")
ops.NotDifferentiable("MergeSummary")
ops.NotDifferentiable("ScalarSummary")
| apache-2.0 |
tedder/ansible | lib/ansible/module_utils/utm_utils.py | 44 | 10446 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
class UTMModuleConfigurationError(Exception):
def __init__(self, msg, **args):
super(UTMModuleConfigurationError, self).__init__(self, msg)
self.msg = msg
self.module_fail_args = args
def do_fail(self, module):
module.fail_json(msg=self.msg, other=self.module_fail_args)
class UTMModule(AnsibleModule):
"""
This is a helper class to construct any UTM Module. This will automatically add the utm host, port, token,
protocol, validate_certs and state field to the module. If you want to implement your own sophos utm module
just initialize this UTMModule class and define the Payload fields that are needed for your module.
See the other modules like utm_aaa_group for example.
"""
def __init__(self, argument_spec, bypass_checks=False, no_log=False, check_invalid_arguments=None,
mutually_exclusive=None, required_together=None, required_one_of=None, add_file_common_args=False,
supports_check_mode=False, required_if=None):
default_specs = dict(
headers=dict(type='dict', required=False, default={}),
utm_host=dict(type='str', required=True),
utm_port=dict(type='int', default=4444),
utm_token=dict(type='str', required=True, no_log=True),
utm_protocol=dict(type='str', required=False, default="https", choices=["https", "http"]),
validate_certs=dict(type='bool', required=False, default=True),
state=dict(default='present', choices=['present', 'absent'])
)
super(UTMModule, self).__init__(self._merge_specs(default_specs, argument_spec), bypass_checks, no_log,
check_invalid_arguments, mutually_exclusive, required_together, required_one_of,
add_file_common_args, supports_check_mode, required_if)
def _merge_specs(self, default_specs, custom_specs):
result = default_specs.copy()
result.update(custom_specs)
return result
class UTM:
def __init__(self, module, endpoint, change_relevant_keys, info_only=False):
"""
Initialize UTM Class
:param module: The Ansible module
:param endpoint: The corresponding endpoint to the module
:param change_relevant_keys: The keys of the object to check for changes
:param info_only: When implementing an info module, set this to true. Will allow access to the info method only
"""
self.info_only = info_only
self.module = module
self.request_url = module.params.get('utm_protocol') + "://" + module.params.get('utm_host') + ":" + to_native(
module.params.get('utm_port')) + "/api/objects/" + endpoint + "/"
"""
The change_relevant_keys will be checked for changes to determine whether the object needs to be updated
"""
self.change_relevant_keys = change_relevant_keys
self.module.params['url_username'] = 'token'
self.module.params['url_password'] = module.params.get('utm_token')
if all(elem in self.change_relevant_keys for elem in module.params.keys()):
raise UTMModuleConfigurationError(
"The keys " + to_native(
self.change_relevant_keys) + " to check are not in the modules keys:\n" + to_native(
module.params.keys()))
def execute(self):
try:
if not self.info_only:
if self.module.params.get('state') == 'present':
self._add()
elif self.module.params.get('state') == 'absent':
self._remove()
else:
self._info()
except Exception as e:
self.module.fail_json(msg=to_native(e))
def _info(self):
"""
returns the info for an object in utm
"""
info, result = self._lookup_entry(self.module, self.request_url)
if info["status"] >= 400:
self.module.fail_json(result=json.loads(info))
else:
if result is None:
self.module.exit_json(changed=False)
else:
self.module.exit_json(result=result, changed=False)
def _add(self):
"""
adds or updates a host object on utm
"""
combined_headers = self._combine_headers()
is_changed = False
info, result = self._lookup_entry(self.module, self.request_url)
if info["status"] >= 400:
self.module.fail_json(result=json.loads(info))
else:
data_as_json_string = self.module.jsonify(self.module.params)
if result is None:
response, info = fetch_url(self.module, self.request_url, method="POST",
headers=combined_headers,
data=data_as_json_string)
if info["status"] >= 400:
self.module.fail_json(msg=json.loads(info["body"]))
is_changed = True
result = self._clean_result(json.loads(response.read()))
else:
if self._is_object_changed(self.change_relevant_keys, self.module, result):
response, info = fetch_url(self.module, self.request_url + result['_ref'], method="PUT",
headers=combined_headers,
data=data_as_json_string)
if info['status'] >= 400:
self.module.fail_json(msg=json.loads(info["body"]))
is_changed = True
result = self._clean_result(json.loads(response.read()))
self.module.exit_json(result=result, changed=is_changed)
def _combine_headers(self):
"""
This will combine a header default with headers that come from the module declaration
:return: A combined headers dict
"""
default_headers = {"Accept": "application/json", "Content-type": "application/json"}
if self.module.params.get('headers') is not None:
result = default_headers.copy()
result.update(self.module.params.get('headers'))
else:
result = default_headers
return result
def _remove(self):
"""
removes an object from utm
"""
is_changed = False
info, result = self._lookup_entry(self.module, self.request_url)
if result is not None:
response, info = fetch_url(self.module, self.request_url + result['_ref'], method="DELETE",
headers={"Accept": "application/json", "X-Restd-Err-Ack": "all"},
data=self.module.jsonify(self.module.params))
if info["status"] >= 400:
self.module.fail_json(msg=json.loads(info["body"]))
else:
is_changed = True
self.module.exit_json(changed=is_changed)
def _lookup_entry(self, module, request_url):
"""
Lookup for existing entry
:param module:
:param request_url:
:return:
"""
response, info = fetch_url(module, request_url, method="GET", headers={"Accept": "application/json"})
result = None
if response is not None:
results = json.loads(response.read())
result = next(iter(filter(lambda d: d['name'] == module.params.get('name'), results)), None)
return info, result
def _clean_result(self, result):
"""
Will clean the result from irrelevant fields
:param result: The result from the query
:return: The modified result
"""
del result['utm_host']
del result['utm_port']
del result['utm_token']
del result['utm_protocol']
del result['validate_certs']
del result['url_username']
del result['url_password']
del result['state']
return result
def _is_object_changed(self, keys, module, result):
"""
Check if my object is changed
:param keys: The keys that will determine if an object is changed
:param module: The module
:param result: The result from the query
:return:
"""
for key in keys:
if module.params.get(key) != result[key]:
return True
return False
| gpl-3.0 |
shahar-stratoscale/nova | nova/tests/cells/test_cells_messaging.py | 1 | 89277 | # Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Cells Messaging module
"""
import mox
from oslo.config import cfg
from oslo import messaging as oslo_messaging
from nova.cells import messaging
from nova.cells import utils as cells_utils
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.network import model as network_model
from nova.objects import base as objects_base
from nova.objects import fields as objects_fields
from nova.objects import instance as instance_obj
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import rpc
from nova import test
from nova.tests.cells import fakes
from nova.tests import fake_instance_actions
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
class CellsMessageClassesTestCase(test.TestCase):
"""Test case for the main Cells Message classes."""
def setUp(self):
super(CellsMessageClassesTestCase, self).setUp()
fakes.init(self)
self.ctxt = context.RequestContext('fake', 'fake')
self.our_name = 'api-cell'
self.msg_runner = fakes.get_message_runner(self.our_name)
self.state_manager = self.msg_runner.state_manager
def test_reverse_path(self):
path = 'a!b!c!d'
expected = 'd!c!b!a'
rev_path = messaging._reverse_path(path)
self.assertEqual(rev_path, expected)
def test_response_cell_name_from_path(self):
# test array with tuples of inputs/expected outputs
test_paths = [('cell1', 'cell1'),
('cell1!cell2', 'cell2!cell1'),
('cell1!cell2!cell3', 'cell3!cell2!cell1')]
for test_input, expected_output in test_paths:
self.assertEqual(expected_output,
messaging._response_cell_name_from_path(test_input))
def test_response_cell_name_from_path_neighbor_only(self):
# test array with tuples of inputs/expected outputs
test_paths = [('cell1', 'cell1'),
('cell1!cell2', 'cell2!cell1'),
('cell1!cell2!cell3', 'cell3!cell2')]
for test_input, expected_output in test_paths:
self.assertEqual(expected_output,
messaging._response_cell_name_from_path(test_input,
neighbor_only=True))
def test_targeted_message(self):
self.flags(max_hop_count=99, group='cells')
target_cell = 'api-cell!child-cell2!grandchild-cell1'
method = 'fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell)
self.assertEqual(self.ctxt, tgt_message.ctxt)
self.assertEqual(method, tgt_message.method_name)
self.assertEqual(method_kwargs, tgt_message.method_kwargs)
self.assertEqual(direction, tgt_message.direction)
self.assertEqual(target_cell, target_cell)
self.assertFalse(tgt_message.fanout)
self.assertFalse(tgt_message.need_response)
self.assertEqual(self.our_name, tgt_message.routing_path)
self.assertEqual(1, tgt_message.hop_count)
self.assertEqual(99, tgt_message.max_hop_count)
self.assertFalse(tgt_message.is_broadcast)
# Correct next hop?
next_hop = tgt_message._get_next_hop()
child_cell = self.state_manager.get_child_cell('child-cell2')
self.assertEqual(child_cell, next_hop)
def test_create_targeted_message_with_response(self):
self.flags(max_hop_count=99, group='cells')
our_name = 'child-cell1'
target_cell = 'child-cell1!api-cell'
msg_runner = fakes.get_message_runner(our_name)
method = 'fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'up'
tgt_message = messaging._TargetedMessage(msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell,
need_response=True)
self.assertEqual(self.ctxt, tgt_message.ctxt)
self.assertEqual(method, tgt_message.method_name)
self.assertEqual(method_kwargs, tgt_message.method_kwargs)
self.assertEqual(direction, tgt_message.direction)
self.assertEqual(target_cell, target_cell)
self.assertFalse(tgt_message.fanout)
self.assertTrue(tgt_message.need_response)
self.assertEqual(our_name, tgt_message.routing_path)
self.assertEqual(1, tgt_message.hop_count)
self.assertEqual(99, tgt_message.max_hop_count)
self.assertFalse(tgt_message.is_broadcast)
# Correct next hop?
next_hop = tgt_message._get_next_hop()
parent_cell = msg_runner.state_manager.get_parent_cell('api-cell')
self.assertEqual(parent_cell, next_hop)
def test_targeted_message_when_target_is_cell_state(self):
method = 'fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
target_cell = self.state_manager.get_child_cell('child-cell2')
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell)
self.assertEqual('api-cell!child-cell2', tgt_message.target_cell)
# Correct next hop?
next_hop = tgt_message._get_next_hop()
self.assertEqual(target_cell, next_hop)
def test_targeted_message_when_target_cell_state_is_me(self):
method = 'fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
target_cell = self.state_manager.get_my_state()
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell)
self.assertEqual('api-cell', tgt_message.target_cell)
# Correct next hop?
next_hop = tgt_message._get_next_hop()
self.assertEqual(target_cell, next_hop)
def test_create_broadcast_message(self):
self.flags(max_hop_count=99, group='cells')
self.flags(name='api-cell', max_hop_count=99, group='cells')
method = 'fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
bcast_message = messaging._BroadcastMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction)
self.assertEqual(self.ctxt, bcast_message.ctxt)
self.assertEqual(method, bcast_message.method_name)
self.assertEqual(method_kwargs, bcast_message.method_kwargs)
self.assertEqual(direction, bcast_message.direction)
self.assertFalse(bcast_message.fanout)
self.assertFalse(bcast_message.need_response)
self.assertEqual(self.our_name, bcast_message.routing_path)
self.assertEqual(1, bcast_message.hop_count)
self.assertEqual(99, bcast_message.max_hop_count)
self.assertTrue(bcast_message.is_broadcast)
# Correct next hops?
next_hops = bcast_message._get_next_hops()
child_cells = self.state_manager.get_child_cells()
self.assertEqual(child_cells, next_hops)
def test_create_broadcast_message_with_response(self):
self.flags(max_hop_count=99, group='cells')
our_name = 'child-cell1'
msg_runner = fakes.get_message_runner(our_name)
method = 'fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'up'
bcast_message = messaging._BroadcastMessage(msg_runner, self.ctxt,
method, method_kwargs, direction, need_response=True)
self.assertEqual(self.ctxt, bcast_message.ctxt)
self.assertEqual(method, bcast_message.method_name)
self.assertEqual(method_kwargs, bcast_message.method_kwargs)
self.assertEqual(direction, bcast_message.direction)
self.assertFalse(bcast_message.fanout)
self.assertTrue(bcast_message.need_response)
self.assertEqual(our_name, bcast_message.routing_path)
self.assertEqual(1, bcast_message.hop_count)
self.assertEqual(99, bcast_message.max_hop_count)
self.assertTrue(bcast_message.is_broadcast)
# Correct next hops?
next_hops = bcast_message._get_next_hops()
parent_cells = msg_runner.state_manager.get_parent_cells()
self.assertEqual(parent_cells, next_hops)
def test_self_targeted_message(self):
target_cell = 'api-cell'
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
call_info = {}
def our_fake_method(message, **kwargs):
call_info['context'] = message.ctxt
call_info['routing_path'] = message.routing_path
call_info['kwargs'] = kwargs
fakes.stub_tgt_method(self, 'api-cell', 'our_fake_method',
our_fake_method)
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell)
tgt_message.process()
self.assertEqual(self.ctxt, call_info['context'])
self.assertEqual(method_kwargs, call_info['kwargs'])
self.assertEqual(target_cell, call_info['routing_path'])
def test_child_targeted_message(self):
target_cell = 'api-cell!child-cell1'
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
call_info = {}
def our_fake_method(message, **kwargs):
call_info['context'] = message.ctxt
call_info['routing_path'] = message.routing_path
call_info['kwargs'] = kwargs
fakes.stub_tgt_method(self, 'child-cell1', 'our_fake_method',
our_fake_method)
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell)
tgt_message.process()
self.assertEqual(self.ctxt, call_info['context'])
self.assertEqual(method_kwargs, call_info['kwargs'])
self.assertEqual(target_cell, call_info['routing_path'])
def test_child_targeted_message_with_object(self):
target_cell = 'api-cell!child-cell1'
method = 'our_fake_method'
direction = 'down'
call_info = {}
class CellsMsgingTestObject(objects_base.NovaObject):
"""Test object. We just need 1 field in order to test
that this gets serialized properly.
"""
fields = {'test': objects_fields.StringField()}
test_obj = CellsMsgingTestObject()
test_obj.test = 'meow'
method_kwargs = dict(obj=test_obj, arg1=1, arg2=2)
def our_fake_method(message, **kwargs):
call_info['context'] = message.ctxt
call_info['routing_path'] = message.routing_path
call_info['kwargs'] = kwargs
fakes.stub_tgt_method(self, 'child-cell1', 'our_fake_method',
our_fake_method)
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell)
tgt_message.process()
self.assertEqual(self.ctxt, call_info['context'])
self.assertEqual(target_cell, call_info['routing_path'])
self.assertEqual(3, len(call_info['kwargs']))
self.assertEqual(1, call_info['kwargs']['arg1'])
self.assertEqual(2, call_info['kwargs']['arg2'])
# Verify we get a new object with what we expect.
obj = call_info['kwargs']['obj']
self.assertIsInstance(obj, CellsMsgingTestObject)
self.assertNotEqual(id(test_obj), id(obj))
self.assertEqual(test_obj.test, obj.test)
def test_grandchild_targeted_message(self):
target_cell = 'api-cell!child-cell2!grandchild-cell1'
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
call_info = {}
def our_fake_method(message, **kwargs):
call_info['context'] = message.ctxt
call_info['routing_path'] = message.routing_path
call_info['kwargs'] = kwargs
fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
our_fake_method)
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell)
tgt_message.process()
self.assertEqual(self.ctxt, call_info['context'])
self.assertEqual(method_kwargs, call_info['kwargs'])
self.assertEqual(target_cell, call_info['routing_path'])
def test_grandchild_targeted_message_with_response(self):
target_cell = 'api-cell!child-cell2!grandchild-cell1'
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
call_info = {}
def our_fake_method(message, **kwargs):
call_info['context'] = message.ctxt
call_info['routing_path'] = message.routing_path
call_info['kwargs'] = kwargs
return 'our_fake_response'
fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
our_fake_method)
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell,
need_response=True)
response = tgt_message.process()
self.assertEqual(self.ctxt, call_info['context'])
self.assertEqual(method_kwargs, call_info['kwargs'])
self.assertEqual(target_cell, call_info['routing_path'])
self.assertFalse(response.failure)
self.assertEqual(response.value_or_raise(), 'our_fake_response')
def test_grandchild_targeted_message_with_error(self):
target_cell = 'api-cell!child-cell2!grandchild-cell1'
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
def our_fake_method(message, **kwargs):
raise test.TestingException('this should be returned')
fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
our_fake_method)
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell,
need_response=True)
response = tgt_message.process()
self.assertTrue(response.failure)
self.assertRaises(test.TestingException, response.value_or_raise)
def test_grandchild_targeted_message_max_hops(self):
self.flags(max_hop_count=2, group='cells')
target_cell = 'api-cell!child-cell2!grandchild-cell1'
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
def our_fake_method(message, **kwargs):
raise test.TestingException('should not be reached')
fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
our_fake_method)
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell,
need_response=True)
response = tgt_message.process()
self.assertTrue(response.failure)
self.assertRaises(exception.CellMaxHopCountReached,
response.value_or_raise)
def test_targeted_message_invalid_cell(self):
target_cell = 'api-cell!child-cell2!grandchild-cell4'
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell,
need_response=True)
response = tgt_message.process()
self.assertTrue(response.failure)
self.assertRaises(exception.CellRoutingInconsistency,
response.value_or_raise)
def test_targeted_message_invalid_cell2(self):
target_cell = 'unknown-cell!child-cell2'
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell,
need_response=True)
response = tgt_message.process()
self.assertTrue(response.failure)
self.assertRaises(exception.CellRoutingInconsistency,
response.value_or_raise)
def test_broadcast_routing(self):
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
cells = set()
def our_fake_method(message, **kwargs):
cells.add(message.routing_path)
fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
bcast_message = messaging._BroadcastMessage(self.msg_runner,
self.ctxt, method,
method_kwargs,
direction,
run_locally=True)
bcast_message.process()
# fakes creates 8 cells (including ourself).
self.assertEqual(len(cells), 8)
def test_broadcast_routing_up(self):
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'up'
msg_runner = fakes.get_message_runner('grandchild-cell3')
cells = set()
def our_fake_method(message, **kwargs):
cells.add(message.routing_path)
fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
bcast_message = messaging._BroadcastMessage(msg_runner, self.ctxt,
method, method_kwargs,
direction,
run_locally=True)
bcast_message.process()
# Paths are reversed, since going 'up'
expected = set(['grandchild-cell3', 'grandchild-cell3!child-cell3',
'grandchild-cell3!child-cell3!api-cell'])
self.assertEqual(expected, cells)
def test_broadcast_routing_without_ourselves(self):
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
cells = set()
def our_fake_method(message, **kwargs):
cells.add(message.routing_path)
fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
bcast_message = messaging._BroadcastMessage(self.msg_runner,
self.ctxt, method,
method_kwargs,
direction,
run_locally=False)
bcast_message.process()
# fakes creates 8 cells (including ourself). So we should see
# only 7 here.
self.assertEqual(len(cells), 7)
def test_broadcast_routing_with_response(self):
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
def our_fake_method(message, **kwargs):
return 'response-%s' % message.routing_path
fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
bcast_message = messaging._BroadcastMessage(self.msg_runner,
self.ctxt, method,
method_kwargs,
direction,
run_locally=True,
need_response=True)
responses = bcast_message.process()
self.assertEqual(len(responses), 8)
for response in responses:
self.assertFalse(response.failure)
self.assertEqual('response-%s' % response.cell_name,
response.value_or_raise())
def test_broadcast_routing_with_response_max_hops(self):
self.flags(max_hop_count=2, group='cells')
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
def our_fake_method(message, **kwargs):
return 'response-%s' % message.routing_path
fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
bcast_message = messaging._BroadcastMessage(self.msg_runner,
self.ctxt, method,
method_kwargs,
direction,
run_locally=True,
need_response=True)
responses = bcast_message.process()
# Should only get responses from our immediate children (and
# ourselves)
self.assertEqual(len(responses), 5)
for response in responses:
self.assertFalse(response.failure)
self.assertEqual('response-%s' % response.cell_name,
response.value_or_raise())
def test_broadcast_routing_with_all_erroring(self):
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
def our_fake_method(message, **kwargs):
raise test.TestingException('fake failure')
fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
bcast_message = messaging._BroadcastMessage(self.msg_runner,
self.ctxt, method,
method_kwargs,
direction,
run_locally=True,
need_response=True)
responses = bcast_message.process()
self.assertEqual(len(responses), 8)
for response in responses:
self.assertTrue(response.failure)
self.assertRaises(test.TestingException, response.value_or_raise)
def test_broadcast_routing_with_two_erroring(self):
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
def our_fake_method_failing(message, **kwargs):
raise test.TestingException('fake failure')
def our_fake_method(message, **kwargs):
return 'response-%s' % message.routing_path
fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
fakes.stub_bcast_method(self, 'child-cell2', 'our_fake_method',
our_fake_method_failing)
fakes.stub_bcast_method(self, 'grandchild-cell3', 'our_fake_method',
our_fake_method_failing)
bcast_message = messaging._BroadcastMessage(self.msg_runner,
self.ctxt, method,
method_kwargs,
direction,
run_locally=True,
need_response=True)
responses = bcast_message.process()
self.assertEqual(len(responses), 8)
failure_responses = [resp for resp in responses if resp.failure]
success_responses = [resp for resp in responses if not resp.failure]
self.assertEqual(len(failure_responses), 2)
self.assertEqual(len(success_responses), 6)
for response in success_responses:
self.assertFalse(response.failure)
self.assertEqual('response-%s' % response.cell_name,
response.value_or_raise())
for response in failure_responses:
self.assertIn(response.cell_name, ['api-cell!child-cell2',
'api-cell!child-cell3!grandchild-cell3'])
self.assertTrue(response.failure)
self.assertRaises(test.TestingException, response.value_or_raise)
class CellsTargetedMethodsTestCase(test.TestCase):
"""Test case for _TargetedMessageMethods class. Most of these
tests actually test the full path from the MessageRunner through
to the functionality of the message method. Hits 2 birds with 1
stone, even though it's a little more than a unit test.
"""
def setUp(self):
super(CellsTargetedMethodsTestCase, self).setUp()
fakes.init(self)
self.ctxt = context.RequestContext('fake', 'fake')
self._setup_attrs('api-cell', 'api-cell!child-cell2')
def _setup_attrs(self, source_cell, target_cell):
self.tgt_cell_name = target_cell
self.src_msg_runner = fakes.get_message_runner(source_cell)
self.src_state_manager = self.src_msg_runner.state_manager
tgt_shortname = target_cell.split('!')[-1]
self.tgt_cell_mgr = fakes.get_cells_manager(tgt_shortname)
self.tgt_msg_runner = self.tgt_cell_mgr.msg_runner
self.tgt_scheduler = self.tgt_msg_runner.scheduler
self.tgt_state_manager = self.tgt_msg_runner.state_manager
methods_cls = self.tgt_msg_runner.methods_by_type['targeted']
self.tgt_methods_cls = methods_cls
self.tgt_compute_api = methods_cls.compute_api
self.tgt_host_api = methods_cls.host_api
self.tgt_db_inst = methods_cls.db
self.tgt_c_rpcapi = methods_cls.compute_rpcapi
def test_schedule_run_instance(self):
host_sched_kwargs = {'filter_properties': {},
'key1': 'value1',
'key2': 'value2'}
self.mox.StubOutWithMock(self.tgt_scheduler, 'run_instance')
self.tgt_scheduler.run_instance(self.ctxt, host_sched_kwargs)
self.mox.ReplayAll()
self.src_msg_runner.schedule_run_instance(self.ctxt,
self.tgt_cell_name,
host_sched_kwargs)
def test_build_instances(self):
build_inst_kwargs = {'filter_properties': {},
'key1': 'value1',
'key2': 'value2'}
self.mox.StubOutWithMock(self.tgt_scheduler, 'build_instances')
self.tgt_scheduler.build_instances(self.ctxt, build_inst_kwargs)
self.mox.ReplayAll()
self.src_msg_runner.build_instances(self.ctxt, self.tgt_cell_name,
build_inst_kwargs)
def test_run_compute_api_method(self):
instance_uuid = 'fake_instance_uuid'
method_info = {'method': 'backup',
'method_args': (instance_uuid, 2, 3),
'method_kwargs': {'arg1': 'val1', 'arg2': 'val2'}}
self.mox.StubOutWithMock(self.tgt_compute_api, 'backup')
self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_get_by_uuid')
self.tgt_db_inst.instance_get_by_uuid(self.ctxt,
instance_uuid).AndReturn('fake_instance')
self.tgt_compute_api.backup(self.ctxt, 'fake_instance', 2, 3,
arg1='val1', arg2='val2').AndReturn('fake_result')
self.mox.ReplayAll()
response = self.src_msg_runner.run_compute_api_method(
self.ctxt,
self.tgt_cell_name,
method_info,
True)
result = response.value_or_raise()
self.assertEqual('fake_result', result)
def test_run_compute_api_method_expects_obj(self):
instance_uuid = 'fake_instance_uuid'
method_info = {'method': 'start',
'method_args': (instance_uuid, 2, 3),
'method_kwargs': {'arg1': 'val1', 'arg2': 'val2'}}
self.mox.StubOutWithMock(self.tgt_compute_api, 'start')
self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_get_by_uuid')
self.tgt_db_inst.instance_get_by_uuid(self.ctxt,
instance_uuid).AndReturn('fake_instance')
def get_instance_mock():
# NOTE(comstud): This block of code simulates the following
# mox code:
#
# self.mox.StubOutWithMock(instance_obj, 'Instance',
# use_mock_anything=True)
# self.mox.StubOutWithMock(instance_obj.Instance,
# '_from_db_object')
# instance_mock = self.mox.CreateMock(instance_obj.Instance)
# instance_obj.Instance().AndReturn(instance_mock)
#
# Unfortunately, the above code fails on py27 do to some
# issue with the Mock object do to similar issue as this:
# https://code.google.com/p/pymox/issues/detail?id=35
#
class FakeInstance(object):
@classmethod
def _from_db_object(cls, ctxt, obj, db_obj):
pass
instance_mock = FakeInstance()
def fake_instance():
return instance_mock
self.stubs.Set(instance_obj, 'Instance', fake_instance)
self.mox.StubOutWithMock(instance_mock, '_from_db_object')
return instance_mock
instance = get_instance_mock()
instance._from_db_object(
self.ctxt, instance, 'fake_instance').AndReturn(instance)
self.tgt_compute_api.start(self.ctxt, instance, 2, 3,
arg1='val1', arg2='val2').AndReturn('fake_result')
self.mox.ReplayAll()
response = self.src_msg_runner.run_compute_api_method(
self.ctxt,
self.tgt_cell_name,
method_info,
True)
result = response.value_or_raise()
self.assertEqual('fake_result', result)
def test_run_compute_api_method_unknown_instance(self):
# Unknown instance should send a broadcast up that instance
# is gone.
instance_uuid = 'fake_instance_uuid'
instance = {'uuid': instance_uuid}
method_info = {'method': 'reboot',
'method_args': (instance_uuid, 2, 3),
'method_kwargs': {'arg1': 'val1', 'arg2': 'val2'}}
self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.tgt_msg_runner,
'instance_destroy_at_top')
self.tgt_db_inst.instance_get_by_uuid(self.ctxt,
'fake_instance_uuid').AndRaise(
exception.InstanceNotFound(instance_id=instance_uuid))
self.tgt_msg_runner.instance_destroy_at_top(self.ctxt, instance)
self.mox.ReplayAll()
response = self.src_msg_runner.run_compute_api_method(
self.ctxt,
self.tgt_cell_name,
method_info,
True)
self.assertRaises(exception.InstanceNotFound,
response.value_or_raise)
def test_update_capabilities(self):
# Route up to API
self._setup_attrs('child-cell2', 'child-cell2!api-cell')
capabs = {'cap1': set(['val1', 'val2']),
'cap2': set(['val3'])}
# The list(set([])) seems silly, but we can't assume the order
# of the list... This behavior should match the code we're
# testing... which is check that a set was converted to a list.
expected_capabs = {'cap1': list(set(['val1', 'val2'])),
'cap2': ['val3']}
self.mox.StubOutWithMock(self.src_state_manager,
'get_our_capabilities')
self.mox.StubOutWithMock(self.tgt_state_manager,
'update_cell_capabilities')
self.mox.StubOutWithMock(self.tgt_msg_runner,
'tell_parents_our_capabilities')
self.src_state_manager.get_our_capabilities().AndReturn(capabs)
self.tgt_state_manager.update_cell_capabilities('child-cell2',
expected_capabs)
self.tgt_msg_runner.tell_parents_our_capabilities(self.ctxt)
self.mox.ReplayAll()
self.src_msg_runner.tell_parents_our_capabilities(self.ctxt)
def test_update_capacities(self):
self._setup_attrs('child-cell2', 'child-cell2!api-cell')
capacs = 'fake_capacs'
self.mox.StubOutWithMock(self.src_state_manager,
'get_our_capacities')
self.mox.StubOutWithMock(self.tgt_state_manager,
'update_cell_capacities')
self.mox.StubOutWithMock(self.tgt_msg_runner,
'tell_parents_our_capacities')
self.src_state_manager.get_our_capacities().AndReturn(capacs)
self.tgt_state_manager.update_cell_capacities('child-cell2',
capacs)
self.tgt_msg_runner.tell_parents_our_capacities(self.ctxt)
self.mox.ReplayAll()
self.src_msg_runner.tell_parents_our_capacities(self.ctxt)
def test_announce_capabilities(self):
self._setup_attrs('api-cell', 'api-cell!child-cell1')
# To make this easier to test, make us only have 1 child cell.
cell_state = self.src_state_manager.child_cells['child-cell1']
self.src_state_manager.child_cells = {'child-cell1': cell_state}
self.mox.StubOutWithMock(self.tgt_msg_runner,
'tell_parents_our_capabilities')
self.tgt_msg_runner.tell_parents_our_capabilities(self.ctxt)
self.mox.ReplayAll()
self.src_msg_runner.ask_children_for_capabilities(self.ctxt)
def test_announce_capacities(self):
self._setup_attrs('api-cell', 'api-cell!child-cell1')
# To make this easier to test, make us only have 1 child cell.
cell_state = self.src_state_manager.child_cells['child-cell1']
self.src_state_manager.child_cells = {'child-cell1': cell_state}
self.mox.StubOutWithMock(self.tgt_msg_runner,
'tell_parents_our_capacities')
self.tgt_msg_runner.tell_parents_our_capacities(self.ctxt)
self.mox.ReplayAll()
self.src_msg_runner.ask_children_for_capacities(self.ctxt)
def test_service_get_by_compute_host(self):
fake_host_name = 'fake-host-name'
self.mox.StubOutWithMock(self.tgt_db_inst,
'service_get_by_compute_host')
self.tgt_db_inst.service_get_by_compute_host(self.ctxt,
fake_host_name).AndReturn('fake-service')
self.mox.ReplayAll()
response = self.src_msg_runner.service_get_by_compute_host(
self.ctxt,
self.tgt_cell_name,
fake_host_name)
result = response.value_or_raise()
self.assertEqual('fake-service', result)
def test_service_update(self):
binary = 'nova-compute'
fake_service = dict(id=42, host='fake_host', binary='nova-compute',
topic='compute')
fake_compute = dict(
id=7116, service_id=42, host='fake_host', vcpus=0, memory_mb=0,
local_gb=0, vcpus_used=0, memory_mb_used=0, local_gb_used=0,
hypervisor_type=0, hypervisor_version=0, hypervisor_hostname=0,
free_ram_mb=0, free_disk_gb=0, current_workload=0, running_vms=0,
cpu_info='HAL', disk_available_least=0)
params_to_update = {'disabled': True, 'report_count': 13}
ctxt = context.RequestContext('fake_user', 'fake_project',
is_admin=True)
# We use the real DB for this test, as it's too hard to reach the
# host_api to mock out its DB methods
db.service_create(ctxt, fake_service)
db.compute_node_create(ctxt, fake_compute)
self.mox.ReplayAll()
response = self.src_msg_runner.service_update(
ctxt, self.tgt_cell_name,
'fake_host', binary, params_to_update)
result = response.value_or_raise()
result.pop('created_at', None)
result.pop('updated_at', None)
result.pop('disabled_reason', None)
expected_result = dict(
deleted=0, deleted_at=None,
binary=fake_service['binary'],
disabled=True, # We just updated this..
report_count=13, # ..and this
host='fake_host', id=42,
topic='compute')
self.assertEqual(expected_result, result)
def test_service_delete(self):
fake_service = dict(id=42, host='fake_host', binary='nova-compute',
topic='compute')
ctxt = self.ctxt.elevated()
db.service_create(ctxt, fake_service)
self.src_msg_runner.service_delete(
ctxt, self.tgt_cell_name, fake_service['id'])
self.assertRaises(exception.ServiceNotFound,
db.service_get, ctxt, fake_service['id'])
def test_proxy_rpc_to_manager_call(self):
fake_topic = 'fake-topic'
fake_rpc_message = {'method': 'fake_rpc_method', 'args': {}}
fake_host_name = 'fake-host-name'
self.mox.StubOutWithMock(self.tgt_db_inst,
'service_get_by_compute_host')
self.tgt_db_inst.service_get_by_compute_host(self.ctxt,
fake_host_name)
target = oslo_messaging.Target(topic='fake-topic')
rpcclient = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(rpc, 'get_client')
rpc.get_client(target).AndReturn(rpcclient)
rpcclient.prepare(timeout=5).AndReturn(rpcclient)
rpcclient.call(mox.IgnoreArg(),
'fake_rpc_method').AndReturn('fake_result')
self.mox.ReplayAll()
response = self.src_msg_runner.proxy_rpc_to_manager(
self.ctxt,
self.tgt_cell_name,
fake_host_name,
fake_topic,
fake_rpc_message, True, timeout=5)
result = response.value_or_raise()
self.assertEqual('fake_result', result)
def test_proxy_rpc_to_manager_cast(self):
fake_topic = 'fake-topic'
fake_rpc_message = {'method': 'fake_rpc_method', 'args': {}}
fake_host_name = 'fake-host-name'
self.mox.StubOutWithMock(self.tgt_db_inst,
'service_get_by_compute_host')
self.tgt_db_inst.service_get_by_compute_host(self.ctxt,
fake_host_name)
target = oslo_messaging.Target(topic='fake-topic')
rpcclient = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(rpc, 'get_client')
rpc.get_client(target).AndReturn(rpcclient)
rpcclient.cast(mox.IgnoreArg(), 'fake_rpc_method')
self.mox.ReplayAll()
self.src_msg_runner.proxy_rpc_to_manager(
self.ctxt,
self.tgt_cell_name,
fake_host_name,
fake_topic,
fake_rpc_message, False, timeout=None)
def test_task_log_get_all_targetted(self):
task_name = 'fake_task_name'
begin = 'fake_begin'
end = 'fake_end'
host = 'fake_host'
state = 'fake_state'
self.mox.StubOutWithMock(self.tgt_db_inst, 'task_log_get_all')
self.tgt_db_inst.task_log_get_all(self.ctxt, task_name,
begin, end, host=host,
state=state).AndReturn(['fake_result'])
self.mox.ReplayAll()
response = self.src_msg_runner.task_log_get_all(self.ctxt,
self.tgt_cell_name, task_name, begin, end, host=host,
state=state)
self.assertIsInstance(response, list)
self.assertEqual(1, len(response))
result = response[0].value_or_raise()
self.assertEqual(['fake_result'], result)
def test_compute_node_get(self):
compute_id = 'fake-id'
self.mox.StubOutWithMock(self.tgt_db_inst, 'compute_node_get')
self.tgt_db_inst.compute_node_get(self.ctxt,
compute_id).AndReturn('fake_result')
self.mox.ReplayAll()
response = self.src_msg_runner.compute_node_get(self.ctxt,
self.tgt_cell_name, compute_id)
result = response.value_or_raise()
self.assertEqual('fake_result', result)
def test_actions_get(self):
fake_uuid = fake_instance_actions.FAKE_UUID
fake_req_id = fake_instance_actions.FAKE_REQUEST_ID1
fake_act = fake_instance_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
self.mox.StubOutWithMock(self.tgt_db_inst, 'actions_get')
self.tgt_db_inst.actions_get(self.ctxt,
'fake-uuid').AndReturn([fake_act])
self.mox.ReplayAll()
response = self.src_msg_runner.actions_get(self.ctxt,
self.tgt_cell_name,
'fake-uuid')
result = response.value_or_raise()
self.assertEqual([jsonutils.to_primitive(fake_act)], result)
def test_action_get_by_request_id(self):
fake_uuid = fake_instance_actions.FAKE_UUID
fake_req_id = fake_instance_actions.FAKE_REQUEST_ID1
fake_act = fake_instance_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
self.mox.StubOutWithMock(self.tgt_db_inst, 'action_get_by_request_id')
self.tgt_db_inst.action_get_by_request_id(self.ctxt,
'fake-uuid', 'req-fake').AndReturn(fake_act)
self.mox.ReplayAll()
response = self.src_msg_runner.action_get_by_request_id(self.ctxt,
self.tgt_cell_name, 'fake-uuid', 'req-fake')
result = response.value_or_raise()
self.assertEqual(jsonutils.to_primitive(fake_act), result)
def test_action_events_get(self):
fake_action_id = fake_instance_actions.FAKE_ACTION_ID1
fake_events = fake_instance_actions.FAKE_EVENTS[fake_action_id]
self.mox.StubOutWithMock(self.tgt_db_inst, 'action_events_get')
self.tgt_db_inst.action_events_get(self.ctxt,
'fake-action').AndReturn(fake_events)
self.mox.ReplayAll()
response = self.src_msg_runner.action_events_get(self.ctxt,
self.tgt_cell_name,
'fake-action')
result = response.value_or_raise()
self.assertEqual(jsonutils.to_primitive(fake_events), result)
def test_validate_console_port(self):
instance_uuid = 'fake_instance_uuid'
instance = {'uuid': instance_uuid}
console_port = 'fake-port'
console_type = 'fake-type'
self.mox.StubOutWithMock(self.tgt_c_rpcapi, 'validate_console_port')
self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_get_by_uuid')
self.tgt_db_inst.instance_get_by_uuid(self.ctxt,
instance_uuid).AndReturn(instance)
self.tgt_c_rpcapi.validate_console_port(self.ctxt,
instance, console_port, console_type).AndReturn('fake_result')
self.mox.ReplayAll()
response = self.src_msg_runner.validate_console_port(self.ctxt,
self.tgt_cell_name, instance_uuid, console_port,
console_type)
result = response.value_or_raise()
self.assertEqual('fake_result', result)
def test_get_migrations_for_a_given_cell(self):
filters = {'cell_name': 'child-cell2', 'status': 'confirmed'}
migrations_in_progress = [{'id': 123}]
self.mox.StubOutWithMock(self.tgt_compute_api,
'get_migrations')
self.tgt_compute_api.get_migrations(self.ctxt, filters).\
AndReturn(migrations_in_progress)
self.mox.ReplayAll()
responses = self.src_msg_runner.get_migrations(
self.ctxt,
self.tgt_cell_name, False, filters)
result = responses[0].value_or_raise()
self.assertEqual(migrations_in_progress, result)
def test_get_migrations_for_an_invalid_cell(self):
filters = {'cell_name': 'invalid_Cell', 'status': 'confirmed'}
responses = self.src_msg_runner.get_migrations(
self.ctxt,
'api_cell!invalid_cell', False, filters)
self.assertEqual(0, len(responses))
def test_call_compute_api_with_obj(self):
instance = instance_obj.Instance()
instance.uuid = uuidutils.generate_uuid()
self.mox.StubOutWithMock(instance, 'refresh')
# Using 'snapshot' for this test, because it
# takes args and kwargs.
self.mox.StubOutWithMock(self.tgt_compute_api, 'snapshot')
instance.refresh(self.ctxt)
self.tgt_compute_api.snapshot(
self.ctxt, instance, 'name',
extra_properties='props').AndReturn('foo')
self.mox.ReplayAll()
result = self.tgt_methods_cls._call_compute_api_with_obj(
self.ctxt, instance, 'snapshot', 'name',
extra_properties='props')
self.assertEqual('foo', result)
def test_call_compute_with_obj_unknown_instance(self):
instance = instance_obj.Instance()
instance.uuid = uuidutils.generate_uuid()
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
self.mox.StubOutWithMock(instance, 'refresh')
self.mox.StubOutWithMock(self.tgt_msg_runner,
'instance_destroy_at_top')
instance.refresh(self.ctxt).AndRaise(
exception.InstanceNotFound(instance_id=instance.uuid))
self.tgt_msg_runner.instance_destroy_at_top(self.ctxt,
{'uuid': instance.uuid})
self.mox.ReplayAll()
self.assertRaises(exception.InstanceNotFound,
self.tgt_methods_cls._call_compute_api_with_obj,
self.ctxt, instance, 'snapshot', 'name')
def _instance_update_helper(self, admin_state_reset):
class FakeMessage(object):
pass
message = FakeMessage()
message.ctxt = self.ctxt
instance = instance_obj.Instance()
instance.cell_name = self.tgt_cell_name
instance.obj_reset_changes()
instance.task_state = 'meow'
instance.vm_state = 'wuff'
instance.user_data = 'foo'
instance.metadata = {'meta': 'data'}
instance.system_metadata = {'system': 'metadata'}
self.assertEqual(set(['user_data', 'vm_state', 'task_state',
'metadata', 'system_metadata']),
instance.obj_what_changed())
self.mox.StubOutWithMock(instance, 'save')
def _check_object(*args, **kwargs):
# task_state and vm_state changes should have been cleared
# before calling save()
if admin_state_reset:
self.assertEqual(
set(['user_data', 'vm_state', 'task_state']),
instance.obj_what_changed())
else:
self.assertEqual(set(['user_data']),
instance.obj_what_changed())
instance.save(self.ctxt, expected_task_state='exp_task',
expected_vm_state='exp_vm').WithSideEffects(
_check_object)
self.mox.ReplayAll()
self.tgt_methods_cls.instance_update_from_api(
message,
instance,
expected_vm_state='exp_vm',
expected_task_state='exp_task',
admin_state_reset=admin_state_reset)
def test_instance_update_from_api(self):
self._instance_update_helper(False)
def test_instance_update_from_api_admin_state_reset(self):
self._instance_update_helper(True)
def _test_instance_action_method(self, method, args, kwargs,
expected_args, expected_kwargs,
expect_result):
class FakeMessage(object):
pass
message = FakeMessage()
message.ctxt = self.ctxt
message.need_response = expect_result
meth_cls = self.tgt_methods_cls
self.mox.StubOutWithMock(meth_cls, '_call_compute_api_with_obj')
method_corrections = {
'terminate': 'delete',
}
api_method = method_corrections.get(method, method)
meth_cls._call_compute_api_with_obj(
self.ctxt, 'fake-instance', api_method,
*expected_args, **expected_kwargs).AndReturn('meow')
self.mox.ReplayAll()
method_translations = {'revert_resize': 'revert_resize',
'confirm_resize': 'confirm_resize',
'reset_network': 'reset_network',
'inject_network_info': 'inject_network_info',
}
tgt_method = method_translations.get(method,
'%s_instance' % method)
result = getattr(meth_cls, tgt_method)(
message, 'fake-instance', *args, **kwargs)
if expect_result:
self.assertEqual('meow', result)
def test_start_instance(self):
self._test_instance_action_method('start', (), {}, (), {}, False)
def test_stop_instance_cast(self):
self._test_instance_action_method('stop', (), {}, (),
{'do_cast': True}, False)
def test_stop_instance_call(self):
self._test_instance_action_method('stop', (), {}, (),
{'do_cast': False}, True)
def test_reboot_instance(self):
kwargs = dict(reboot_type='HARD')
self._test_instance_action_method('reboot', (), kwargs, (),
kwargs, False)
def test_suspend_instance(self):
self._test_instance_action_method('suspend', (), {}, (), {}, False)
def test_resume_instance(self):
self._test_instance_action_method('resume', (), {}, (), {}, False)
def test_get_host_uptime(self):
host_name = "fake-host"
host_uptime = (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
" 0.20, 0.12, 0.14")
self.mox.StubOutWithMock(self.tgt_host_api, 'get_host_uptime')
self.tgt_host_api.get_host_uptime(self.ctxt, host_name).\
AndReturn(host_uptime)
self.mox.ReplayAll()
response = self.src_msg_runner.get_host_uptime(self.ctxt,
self.tgt_cell_name,
host_name)
expected_host_uptime = response.value_or_raise()
self.assertEqual(host_uptime, expected_host_uptime)
def test_terminate_instance(self):
self._test_instance_action_method('terminate',
(), {}, (), {}, False)
def test_soft_delete_instance(self):
self._test_instance_action_method('soft_delete',
(), {}, (), {}, False)
def test_pause_instance(self):
self._test_instance_action_method('pause', (), {}, (), {}, False)
def test_unpause_instance(self):
self._test_instance_action_method('unpause', (), {}, (), {}, False)
def test_resize_instance(self):
kwargs = dict(flavor=dict(id=42, flavorid='orangemocchafrappuccino'),
extra_instance_updates=dict(cow='moo'))
expected_kwargs = dict(flavor_id='orangemocchafrappuccino', cow='moo')
self._test_instance_action_method('resize', (), kwargs,
(), expected_kwargs,
False)
def test_live_migrate_instance(self):
kwargs = dict(block_migration='fake-block-mig',
disk_over_commit='fake-commit',
host_name='fake-host',
pclm='fake-pclm')
expected_args = ('fake-block-mig', 'fake-commit', 'fake-host', 'fake-pclm')
self._test_instance_action_method('live_migrate', (), kwargs,
expected_args, {}, False)
def test_revert_resize(self):
self._test_instance_action_method('revert_resize',
(), {}, (), {}, False)
def test_confirm_resize(self):
self._test_instance_action_method('confirm_resize',
(), {}, (), {}, False)
def test_reset_network(self):
self._test_instance_action_method('reset_network',
(), {}, (), {}, False)
def test_inject_network_info(self):
self._test_instance_action_method('inject_network_info',
(), {}, (), {}, False)
def test_snapshot_instance(self):
inst = instance_obj.Instance()
meth_cls = self.tgt_methods_cls
self.mox.StubOutWithMock(inst, 'refresh')
self.mox.StubOutWithMock(inst, 'save')
self.mox.StubOutWithMock(meth_cls.compute_rpcapi, 'snapshot_instance')
def check_state(expected_task_state=None):
self.assertEqual(task_states.IMAGE_SNAPSHOT_PENDING,
inst.task_state)
inst.refresh()
inst.save(expected_task_state=[None]).WithSideEffects(check_state)
meth_cls.compute_rpcapi.snapshot_instance(self.ctxt,
inst, 'image-id')
self.mox.ReplayAll()
class FakeMessage(object):
pass
message = FakeMessage()
message.ctxt = self.ctxt
message.need_response = False
meth_cls.snapshot_instance(message, inst, image_id='image-id')
def test_backup_instance(self):
inst = instance_obj.Instance()
meth_cls = self.tgt_methods_cls
self.mox.StubOutWithMock(inst, 'refresh')
self.mox.StubOutWithMock(inst, 'save')
self.mox.StubOutWithMock(meth_cls.compute_rpcapi, 'backup_instance')
def check_state(expected_task_state=None):
self.assertEqual(task_states.IMAGE_BACKUP, inst.task_state)
inst.refresh()
inst.save(expected_task_state=[None]).WithSideEffects(check_state)
meth_cls.compute_rpcapi.backup_instance(self.ctxt,
inst,
'image-id',
'backup-type',
'rotation')
self.mox.ReplayAll()
class FakeMessage(object):
pass
message = FakeMessage()
message.ctxt = self.ctxt
message.need_response = False
meth_cls.backup_instance(message, inst,
image_id='image-id',
backup_type='backup-type',
rotation='rotation')
class CellsBroadcastMethodsTestCase(test.TestCase):
"""Test case for _BroadcastMessageMethods class. Most of these
tests actually test the full path from the MessageRunner through
to the functionality of the message method. Hits 2 birds with 1
stone, even though it's a little more than a unit test.
"""
def setUp(self):
super(CellsBroadcastMethodsTestCase, self).setUp()
fakes.init(self)
self.ctxt = context.RequestContext('fake', 'fake')
self._setup_attrs()
def _setup_attrs(self, up=True):
mid_cell = 'child-cell2'
if up:
src_cell = 'grandchild-cell1'
tgt_cell = 'api-cell'
else:
src_cell = 'api-cell'
tgt_cell = 'grandchild-cell1'
self.src_msg_runner = fakes.get_message_runner(src_cell)
methods_cls = self.src_msg_runner.methods_by_type['broadcast']
self.src_methods_cls = methods_cls
self.src_db_inst = methods_cls.db
self.src_compute_api = methods_cls.compute_api
self.src_ca_rpcapi = methods_cls.consoleauth_rpcapi
if not up:
# fudge things so we only have 1 child to broadcast to
state_manager = self.src_msg_runner.state_manager
for cell in state_manager.get_child_cells():
if cell.name != 'child-cell2':
del state_manager.child_cells[cell.name]
self.mid_msg_runner = fakes.get_message_runner(mid_cell)
methods_cls = self.mid_msg_runner.methods_by_type['broadcast']
self.mid_methods_cls = methods_cls
self.mid_db_inst = methods_cls.db
self.mid_compute_api = methods_cls.compute_api
self.mid_ca_rpcapi = methods_cls.consoleauth_rpcapi
self.tgt_msg_runner = fakes.get_message_runner(tgt_cell)
methods_cls = self.tgt_msg_runner.methods_by_type['broadcast']
self.tgt_methods_cls = methods_cls
self.tgt_db_inst = methods_cls.db
self.tgt_compute_api = methods_cls.compute_api
self.tgt_ca_rpcapi = methods_cls.consoleauth_rpcapi
def test_at_the_top(self):
self.assertTrue(self.tgt_methods_cls._at_the_top())
self.assertFalse(self.mid_methods_cls._at_the_top())
self.assertFalse(self.src_methods_cls._at_the_top())
def test_apply_expected_states_building(self):
instance_info = {'vm_state': vm_states.BUILDING}
expected = dict(instance_info,
expected_vm_state=[vm_states.BUILDING, None])
self.src_methods_cls._apply_expected_states(instance_info)
self.assertEqual(expected, instance_info)
def test_apply_expected_states_resize_finish(self):
instance_info = {'task_state': task_states.RESIZE_FINISH}
exp_states = [task_states.RESIZE_FINISH,
task_states.RESIZE_MIGRATED,
task_states.RESIZE_MIGRATING,
task_states.RESIZE_PREP]
expected = dict(instance_info, expected_task_state=exp_states)
self.src_methods_cls._apply_expected_states(instance_info)
self.assertEqual(expected, instance_info)
def _test_instance_update_at_top(self, net_info, exists=True):
fake_info_cache = {'id': 1,
'instance': 'fake_instance',
'network_info': net_info}
fake_sys_metadata = [{'id': 1,
'key': 'key1',
'value': 'value1'},
{'id': 2,
'key': 'key2',
'value': 'value2'}]
fake_instance = {'id': 2,
'uuid': 'fake_uuid',
'security_groups': 'fake',
'volumes': 'fake',
'cell_name': 'fake',
'name': 'fake',
'metadata': 'fake',
'info_cache': fake_info_cache,
'system_metadata': fake_sys_metadata,
'other': 'meow'}
expected_sys_metadata = {'key1': 'value1',
'key2': 'value2'}
expected_info_cache = {'network_info': "[]"}
expected_cell_name = 'api-cell!child-cell2!grandchild-cell1'
expected_instance = {'system_metadata': expected_sys_metadata,
'cell_name': expected_cell_name,
'other': 'meow',
'uuid': 'fake_uuid'}
# To show these should not be called in src/mid-level cell
self.mox.StubOutWithMock(self.src_db_inst, 'instance_update')
self.mox.StubOutWithMock(self.src_db_inst,
'instance_info_cache_update')
self.mox.StubOutWithMock(self.mid_db_inst, 'instance_update')
self.mox.StubOutWithMock(self.mid_db_inst,
'instance_info_cache_update')
self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_update')
self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_create')
self.mox.StubOutWithMock(self.tgt_db_inst,
'instance_info_cache_update')
mock = self.tgt_db_inst.instance_update(self.ctxt, 'fake_uuid',
expected_instance,
update_cells=False)
if not exists:
mock.AndRaise(exception.InstanceNotFound(instance_id='fake_uuid'))
self.tgt_db_inst.instance_create(self.ctxt,
expected_instance)
self.tgt_db_inst.instance_info_cache_update(self.ctxt, 'fake_uuid',
expected_info_cache)
self.mox.ReplayAll()
self.src_msg_runner.instance_update_at_top(self.ctxt, fake_instance)
def test_instance_update_at_top(self):
self._test_instance_update_at_top("[]")
def test_instance_update_at_top_netinfo_list(self):
self._test_instance_update_at_top([])
def test_instance_update_at_top_netinfo_model(self):
self._test_instance_update_at_top(network_model.NetworkInfo())
def test_instance_update_at_top_doesnt_already_exist(self):
self._test_instance_update_at_top([], exists=False)
def test_instance_update_at_top_with_building_state(self):
fake_info_cache = {'id': 1,
'instance': 'fake_instance',
'other': 'moo'}
fake_sys_metadata = [{'id': 1,
'key': 'key1',
'value': 'value1'},
{'id': 2,
'key': 'key2',
'value': 'value2'}]
fake_instance = {'id': 2,
'uuid': 'fake_uuid',
'security_groups': 'fake',
'volumes': 'fake',
'cell_name': 'fake',
'name': 'fake',
'metadata': 'fake',
'info_cache': fake_info_cache,
'system_metadata': fake_sys_metadata,
'vm_state': vm_states.BUILDING,
'other': 'meow'}
expected_sys_metadata = {'key1': 'value1',
'key2': 'value2'}
expected_info_cache = {'other': 'moo'}
expected_cell_name = 'api-cell!child-cell2!grandchild-cell1'
expected_instance = {'system_metadata': expected_sys_metadata,
'cell_name': expected_cell_name,
'other': 'meow',
'vm_state': vm_states.BUILDING,
'expected_vm_state': [vm_states.BUILDING, None],
'uuid': 'fake_uuid'}
# To show these should not be called in src/mid-level cell
self.mox.StubOutWithMock(self.src_db_inst, 'instance_update')
self.mox.StubOutWithMock(self.src_db_inst,
'instance_info_cache_update')
self.mox.StubOutWithMock(self.mid_db_inst, 'instance_update')
self.mox.StubOutWithMock(self.mid_db_inst,
'instance_info_cache_update')
self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_update')
self.mox.StubOutWithMock(self.tgt_db_inst,
'instance_info_cache_update')
self.tgt_db_inst.instance_update(self.ctxt, 'fake_uuid',
expected_instance,
update_cells=False)
self.tgt_db_inst.instance_info_cache_update(self.ctxt, 'fake_uuid',
expected_info_cache)
self.mox.ReplayAll()
self.src_msg_runner.instance_update_at_top(self.ctxt, fake_instance)
def test_instance_destroy_at_top(self):
fake_instance = {'uuid': 'fake_uuid'}
# To show these should not be called in src/mid-level cell
self.mox.StubOutWithMock(self.src_db_inst, 'instance_destroy')
self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_destroy')
self.tgt_db_inst.instance_destroy(self.ctxt, 'fake_uuid',
update_cells=False)
self.mox.ReplayAll()
self.src_msg_runner.instance_destroy_at_top(self.ctxt, fake_instance)
def test_instance_hard_delete_everywhere(self):
# Reset this, as this is a broadcast down.
self._setup_attrs(up=False)
instance = {'uuid': 'meow'}
# Should not be called in src (API cell)
self.mox.StubOutWithMock(self.src_compute_api, 'delete')
self.mox.StubOutWithMock(self.mid_compute_api, 'delete')
self.mox.StubOutWithMock(self.tgt_compute_api, 'delete')
self.mid_compute_api.delete(self.ctxt, instance)
self.tgt_compute_api.delete(self.ctxt, instance)
self.mox.ReplayAll()
self.src_msg_runner.instance_delete_everywhere(self.ctxt,
instance, 'hard')
def test_instance_soft_delete_everywhere(self):
# Reset this, as this is a broadcast down.
self._setup_attrs(up=False)
instance = {'uuid': 'meow'}
# Should not be called in src (API cell)
self.mox.StubOutWithMock(self.src_compute_api, 'soft_delete')
self.mox.StubOutWithMock(self.mid_compute_api, 'soft_delete')
self.mox.StubOutWithMock(self.tgt_compute_api, 'soft_delete')
self.mid_compute_api.soft_delete(self.ctxt, instance)
self.tgt_compute_api.soft_delete(self.ctxt, instance)
self.mox.ReplayAll()
self.src_msg_runner.instance_delete_everywhere(self.ctxt,
instance, 'soft')
def test_instance_fault_create_at_top(self):
fake_instance_fault = {'id': 1,
'other stuff': 2,
'more stuff': 3}
expected_instance_fault = {'other stuff': 2,
'more stuff': 3}
# Shouldn't be called for these 2 cells
self.mox.StubOutWithMock(self.src_db_inst, 'instance_fault_create')
self.mox.StubOutWithMock(self.mid_db_inst, 'instance_fault_create')
self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_fault_create')
self.tgt_db_inst.instance_fault_create(self.ctxt,
expected_instance_fault)
self.mox.ReplayAll()
self.src_msg_runner.instance_fault_create_at_top(self.ctxt,
fake_instance_fault)
def test_bw_usage_update_at_top(self):
fake_bw_update_info = {'uuid': 'fake_uuid',
'mac': 'fake_mac',
'start_period': 'fake_start_period',
'bw_in': 'fake_bw_in',
'bw_out': 'fake_bw_out',
'last_ctr_in': 'fake_last_ctr_in',
'last_ctr_out': 'fake_last_ctr_out',
'last_refreshed': 'fake_last_refreshed'}
# Shouldn't be called for these 2 cells
self.mox.StubOutWithMock(self.src_db_inst, 'bw_usage_update')
self.mox.StubOutWithMock(self.mid_db_inst, 'bw_usage_update')
self.mox.StubOutWithMock(self.tgt_db_inst, 'bw_usage_update')
self.tgt_db_inst.bw_usage_update(self.ctxt, **fake_bw_update_info)
self.mox.ReplayAll()
self.src_msg_runner.bw_usage_update_at_top(self.ctxt,
fake_bw_update_info)
def test_sync_instances(self):
# Reset this, as this is a broadcast down.
self._setup_attrs(up=False)
project_id = 'fake_project_id'
updated_since_raw = 'fake_updated_since_raw'
updated_since_parsed = 'fake_updated_since_parsed'
deleted = 'fake_deleted'
instance1 = dict(uuid='fake_uuid1', deleted=False)
instance2 = dict(uuid='fake_uuid2', deleted=True)
fake_instances = [instance1, instance2]
self.mox.StubOutWithMock(self.tgt_msg_runner,
'instance_update_at_top')
self.mox.StubOutWithMock(self.tgt_msg_runner,
'instance_destroy_at_top')
self.mox.StubOutWithMock(timeutils, 'parse_isotime')
self.mox.StubOutWithMock(cells_utils, 'get_instances_to_sync')
# Middle cell.
timeutils.parse_isotime(updated_since_raw).AndReturn(
updated_since_parsed)
cells_utils.get_instances_to_sync(self.ctxt,
updated_since=updated_since_parsed,
project_id=project_id,
deleted=deleted).AndReturn([])
# Bottom/Target cell
timeutils.parse_isotime(updated_since_raw).AndReturn(
updated_since_parsed)
cells_utils.get_instances_to_sync(self.ctxt,
updated_since=updated_since_parsed,
project_id=project_id,
deleted=deleted).AndReturn(fake_instances)
self.tgt_msg_runner.instance_update_at_top(self.ctxt, instance1)
self.tgt_msg_runner.instance_destroy_at_top(self.ctxt, instance2)
self.mox.ReplayAll()
self.src_msg_runner.sync_instances(self.ctxt,
project_id, updated_since_raw, deleted)
def test_service_get_all_with_disabled(self):
# Reset this, as this is a broadcast down.
self._setup_attrs(up=False)
ctxt = self.ctxt.elevated()
self.mox.StubOutWithMock(self.src_db_inst, 'service_get_all')
self.mox.StubOutWithMock(self.mid_db_inst, 'service_get_all')
self.mox.StubOutWithMock(self.tgt_db_inst, 'service_get_all')
self.src_db_inst.service_get_all(ctxt,
disabled=None).AndReturn([1, 2])
self.mid_db_inst.service_get_all(ctxt,
disabled=None).AndReturn([3])
self.tgt_db_inst.service_get_all(ctxt,
disabled=None).AndReturn([4, 5])
self.mox.ReplayAll()
responses = self.src_msg_runner.service_get_all(ctxt,
filters={})
response_values = [(resp.cell_name, resp.value_or_raise())
for resp in responses]
expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
('api-cell!child-cell2', [3]),
('api-cell', [1, 2])]
self.assertEqual(expected, response_values)
def test_service_get_all_without_disabled(self):
# Reset this, as this is a broadcast down.
self._setup_attrs(up=False)
disabled = False
filters = {'disabled': disabled}
ctxt = self.ctxt.elevated()
self.mox.StubOutWithMock(self.src_db_inst, 'service_get_all')
self.mox.StubOutWithMock(self.mid_db_inst, 'service_get_all')
self.mox.StubOutWithMock(self.tgt_db_inst, 'service_get_all')
self.src_db_inst.service_get_all(ctxt,
disabled=disabled).AndReturn([1, 2])
self.mid_db_inst.service_get_all(ctxt,
disabled=disabled).AndReturn([3])
self.tgt_db_inst.service_get_all(ctxt,
disabled=disabled).AndReturn([4, 5])
self.mox.ReplayAll()
responses = self.src_msg_runner.service_get_all(ctxt,
filters=filters)
response_values = [(resp.cell_name, resp.value_or_raise())
for resp in responses]
expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
('api-cell!child-cell2', [3]),
('api-cell', [1, 2])]
self.assertEqual(expected, response_values)
def test_task_log_get_all_broadcast(self):
# Reset this, as this is a broadcast down.
self._setup_attrs(up=False)
task_name = 'fake_task_name'
begin = 'fake_begin'
end = 'fake_end'
host = 'fake_host'
state = 'fake_state'
ctxt = self.ctxt.elevated()
self.mox.StubOutWithMock(self.src_db_inst, 'task_log_get_all')
self.mox.StubOutWithMock(self.mid_db_inst, 'task_log_get_all')
self.mox.StubOutWithMock(self.tgt_db_inst, 'task_log_get_all')
self.src_db_inst.task_log_get_all(ctxt, task_name,
begin, end, host=host, state=state).AndReturn([1, 2])
self.mid_db_inst.task_log_get_all(ctxt, task_name,
begin, end, host=host, state=state).AndReturn([3])
self.tgt_db_inst.task_log_get_all(ctxt, task_name,
begin, end, host=host, state=state).AndReturn([4, 5])
self.mox.ReplayAll()
responses = self.src_msg_runner.task_log_get_all(ctxt, None,
task_name, begin, end, host=host, state=state)
response_values = [(resp.cell_name, resp.value_or_raise())
for resp in responses]
expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
('api-cell!child-cell2', [3]),
('api-cell', [1, 2])]
self.assertEqual(expected, response_values)
def test_compute_node_get_all(self):
# Reset this, as this is a broadcast down.
self._setup_attrs(up=False)
ctxt = self.ctxt.elevated()
self.mox.StubOutWithMock(self.src_db_inst, 'compute_node_get_all')
self.mox.StubOutWithMock(self.mid_db_inst, 'compute_node_get_all')
self.mox.StubOutWithMock(self.tgt_db_inst, 'compute_node_get_all')
self.src_db_inst.compute_node_get_all(ctxt).AndReturn([1, 2])
self.mid_db_inst.compute_node_get_all(ctxt).AndReturn([3])
self.tgt_db_inst.compute_node_get_all(ctxt).AndReturn([4, 5])
self.mox.ReplayAll()
responses = self.src_msg_runner.compute_node_get_all(ctxt)
response_values = [(resp.cell_name, resp.value_or_raise())
for resp in responses]
expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
('api-cell!child-cell2', [3]),
('api-cell', [1, 2])]
self.assertEqual(expected, response_values)
def test_compute_node_get_all_with_hyp_match(self):
# Reset this, as this is a broadcast down.
self._setup_attrs(up=False)
hypervisor_match = 'meow'
ctxt = self.ctxt.elevated()
self.mox.StubOutWithMock(self.src_db_inst,
'compute_node_search_by_hypervisor')
self.mox.StubOutWithMock(self.mid_db_inst,
'compute_node_search_by_hypervisor')
self.mox.StubOutWithMock(self.tgt_db_inst,
'compute_node_search_by_hypervisor')
self.src_db_inst.compute_node_search_by_hypervisor(ctxt,
hypervisor_match).AndReturn([1, 2])
self.mid_db_inst.compute_node_search_by_hypervisor(ctxt,
hypervisor_match).AndReturn([3])
self.tgt_db_inst.compute_node_search_by_hypervisor(ctxt,
hypervisor_match).AndReturn([4, 5])
self.mox.ReplayAll()
responses = self.src_msg_runner.compute_node_get_all(ctxt,
hypervisor_match=hypervisor_match)
response_values = [(resp.cell_name, resp.value_or_raise())
for resp in responses]
expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
('api-cell!child-cell2', [3]),
('api-cell', [1, 2])]
self.assertEqual(expected, response_values)
def test_compute_node_stats(self):
# Reset this, as this is a broadcast down.
self._setup_attrs(up=False)
ctxt = self.ctxt.elevated()
self.mox.StubOutWithMock(self.src_db_inst,
'compute_node_statistics')
self.mox.StubOutWithMock(self.mid_db_inst,
'compute_node_statistics')
self.mox.StubOutWithMock(self.tgt_db_inst,
'compute_node_statistics')
self.src_db_inst.compute_node_statistics(ctxt).AndReturn([1, 2])
self.mid_db_inst.compute_node_statistics(ctxt).AndReturn([3])
self.tgt_db_inst.compute_node_statistics(ctxt).AndReturn([4, 5])
self.mox.ReplayAll()
responses = self.src_msg_runner.compute_node_stats(ctxt)
response_values = [(resp.cell_name, resp.value_or_raise())
for resp in responses]
expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
('api-cell!child-cell2', [3]),
('api-cell', [1, 2])]
self.assertEqual(expected, response_values)
def test_consoleauth_delete_tokens(self):
fake_uuid = 'fake-instance-uuid'
# To show these should not be called in src/mid-level cell
self.mox.StubOutWithMock(self.src_ca_rpcapi,
'delete_tokens_for_instance')
self.mox.StubOutWithMock(self.mid_ca_rpcapi,
'delete_tokens_for_instance')
self.mox.StubOutWithMock(self.tgt_ca_rpcapi,
'delete_tokens_for_instance')
self.tgt_ca_rpcapi.delete_tokens_for_instance(self.ctxt, fake_uuid)
self.mox.ReplayAll()
self.src_msg_runner.consoleauth_delete_tokens(self.ctxt, fake_uuid)
def test_bdm_update_or_create_with_none_create(self):
fake_bdm = {'id': 'fake_id',
'volume_id': 'fake_volume_id'}
expected_bdm = fake_bdm.copy()
expected_bdm.pop('id')
# Shouldn't be called for these 2 cells
self.mox.StubOutWithMock(self.src_db_inst,
'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(self.mid_db_inst,
'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(self.tgt_db_inst,
'block_device_mapping_update_or_create')
self.tgt_db_inst.block_device_mapping_update_or_create(
self.ctxt, expected_bdm, legacy=False)
self.mox.ReplayAll()
self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt,
fake_bdm,
create=None)
def test_bdm_update_or_create_with_true_create(self):
fake_bdm = {'id': 'fake_id',
'volume_id': 'fake_volume_id'}
expected_bdm = fake_bdm.copy()
expected_bdm.pop('id')
# Shouldn't be called for these 2 cells
self.mox.StubOutWithMock(self.src_db_inst,
'block_device_mapping_create')
self.mox.StubOutWithMock(self.mid_db_inst,
'block_device_mapping_create')
self.mox.StubOutWithMock(self.tgt_db_inst,
'block_device_mapping_create')
self.tgt_db_inst.block_device_mapping_create(
self.ctxt, fake_bdm, legacy=False)
self.mox.ReplayAll()
self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt,
fake_bdm,
create=True)
def test_bdm_update_or_create_with_false_create_vol_id(self):
fake_bdm = {'id': 'fake_id',
'instance_uuid': 'fake_instance_uuid',
'device_name': 'fake_device_name',
'volume_id': 'fake_volume_id'}
expected_bdm = fake_bdm.copy()
expected_bdm.pop('id')
fake_inst_bdms = [{'id': 1,
'volume_id': 'not-a-match',
'device_name': 'not-a-match'},
{'id': 2,
'volume_id': 'fake_volume_id',
'device_name': 'not-a-match'},
{'id': 3,
'volume_id': 'not-a-match',
'device_name': 'not-a-match'}]
# Shouldn't be called for these 2 cells
self.mox.StubOutWithMock(self.src_db_inst,
'block_device_mapping_update')
self.mox.StubOutWithMock(self.mid_db_inst,
'block_device_mapping_update')
self.mox.StubOutWithMock(self.tgt_db_inst,
'block_device_mapping_get_all_by_instance')
self.mox.StubOutWithMock(self.tgt_db_inst,
'block_device_mapping_update')
self.tgt_db_inst.block_device_mapping_get_all_by_instance(
self.ctxt, 'fake_instance_uuid').AndReturn(
fake_inst_bdms)
# Should try to update ID 2.
self.tgt_db_inst.block_device_mapping_update(
self.ctxt, 2, expected_bdm, legacy=False)
self.mox.ReplayAll()
self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt,
fake_bdm,
create=False)
def test_bdm_update_or_create_with_false_create_dev_name(self):
fake_bdm = {'id': 'fake_id',
'instance_uuid': 'fake_instance_uuid',
'device_name': 'fake_device_name',
'volume_id': 'fake_volume_id'}
expected_bdm = fake_bdm.copy()
expected_bdm.pop('id')
fake_inst_bdms = [{'id': 1,
'volume_id': 'not-a-match',
'device_name': 'not-a-match'},
{'id': 2,
'volume_id': 'not-a-match',
'device_name': 'fake_device_name'},
{'id': 3,
'volume_id': 'not-a-match',
'device_name': 'not-a-match'}]
# Shouldn't be called for these 2 cells
self.mox.StubOutWithMock(self.src_db_inst,
'block_device_mapping_update')
self.mox.StubOutWithMock(self.mid_db_inst,
'block_device_mapping_update')
self.mox.StubOutWithMock(self.tgt_db_inst,
'block_device_mapping_get_all_by_instance')
self.mox.StubOutWithMock(self.tgt_db_inst,
'block_device_mapping_update')
self.tgt_db_inst.block_device_mapping_get_all_by_instance(
self.ctxt, 'fake_instance_uuid').AndReturn(
fake_inst_bdms)
# Should try to update ID 2.
self.tgt_db_inst.block_device_mapping_update(
self.ctxt, 2, expected_bdm, legacy=False)
self.mox.ReplayAll()
self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt,
fake_bdm,
create=False)
def test_bdm_destroy_by_volume(self):
fake_instance_uuid = 'fake-instance-uuid'
fake_volume_id = 'fake-volume-name'
# Shouldn't be called for these 2 cells
self.mox.StubOutWithMock(self.src_db_inst,
'block_device_mapping_destroy_by_instance_and_volume')
self.mox.StubOutWithMock(self.mid_db_inst,
'block_device_mapping_destroy_by_instance_and_volume')
self.mox.StubOutWithMock(self.tgt_db_inst,
'block_device_mapping_destroy_by_instance_and_volume')
self.tgt_db_inst.block_device_mapping_destroy_by_instance_and_volume(
self.ctxt, fake_instance_uuid, fake_volume_id)
self.mox.ReplayAll()
self.src_msg_runner.bdm_destroy_at_top(self.ctxt, fake_instance_uuid,
volume_id=fake_volume_id)
def test_bdm_destroy_by_device(self):
fake_instance_uuid = 'fake-instance-uuid'
fake_device_name = 'fake-device-name'
# Shouldn't be called for these 2 cells
self.mox.StubOutWithMock(self.src_db_inst,
'block_device_mapping_destroy_by_instance_and_device')
self.mox.StubOutWithMock(self.mid_db_inst,
'block_device_mapping_destroy_by_instance_and_device')
self.mox.StubOutWithMock(self.tgt_db_inst,
'block_device_mapping_destroy_by_instance_and_device')
self.tgt_db_inst.block_device_mapping_destroy_by_instance_and_device(
self.ctxt, fake_instance_uuid, fake_device_name)
self.mox.ReplayAll()
self.src_msg_runner.bdm_destroy_at_top(self.ctxt, fake_instance_uuid,
device_name=fake_device_name)
def test_get_migrations(self):
self._setup_attrs(up=False)
filters = {'status': 'confirmed'}
migrations_from_cell1 = [{'id': 123}]
migrations_from_cell2 = [{'id': 456}]
self.mox.StubOutWithMock(self.mid_compute_api,
'get_migrations')
self.mid_compute_api.get_migrations(self.ctxt, filters).\
AndReturn(migrations_from_cell1)
self.mox.StubOutWithMock(self.tgt_compute_api,
'get_migrations')
self.tgt_compute_api.get_migrations(self.ctxt, filters).\
AndReturn(migrations_from_cell2)
self.mox.ReplayAll()
responses = self.src_msg_runner.get_migrations(
self.ctxt,
None, False, filters)
self.assertEqual(2, len(responses))
for response in responses:
self.assertIn(response.value_or_raise(), [migrations_from_cell1,
migrations_from_cell2])
| apache-2.0 |
marckuz/django | tests/admin_changelist/tests.py | 55 | 41903 | from __future__ import unicode_literals
import datetime
from django.contrib import admin
from django.contrib.admin.models import LogEntry
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.templatetags.admin_list import pagination
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.contrib.admin.views.main import ALL_VAR, SEARCH_VAR, ChangeList
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.template import Context, Template
from django.test import TestCase, ignore_warnings, override_settings
from django.test.client import RequestFactory
from django.utils import formats, six
from django.utils.deprecation import RemovedInDjango20Warning
from .admin import (
BandAdmin, ChildAdmin, ChordsBandAdmin, ConcertAdmin,
CustomPaginationAdmin, CustomPaginator, DynamicListDisplayChildAdmin,
DynamicListDisplayLinksChildAdmin, DynamicListFilterChildAdmin,
DynamicSearchFieldsChildAdmin, EmptyValueChildAdmin, FilteredChildAdmin,
GroupAdmin, InvitationAdmin, NoListDisplayLinksParentAdmin, ParentAdmin,
QuartetAdmin, SwallowAdmin, site as custom_site,
)
from .models import (
Band, Child, ChordsBand, ChordsMusician, Concert, CustomIdUser, Event,
Genre, Group, Invitation, Membership, Musician, OrderedObject, Parent,
Quartet, Swallow, SwallowOneToOne, UnorderedObject,
)
@override_settings(ROOT_URLCONF="admin_changelist.urls")
class ChangeListTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
def _create_superuser(self, username):
return User.objects.create(username=username, is_superuser=True)
def _mocked_authenticated_request(self, url, user):
request = self.factory.get(url)
request.user = user
return request
def test_select_related_preserved(self):
"""
Regression test for #10348: ChangeList.get_queryset() shouldn't
overwrite a custom select_related provided by ModelAdmin.get_queryset().
"""
m = ChildAdmin(Child, custom_site)
request = self.factory.get('/child/')
list_select_related = m.get_list_select_related(request)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
self.assertEqual(cl.queryset.query.select_related, {
'parent': {'name': {}}
})
def test_select_related_as_tuple(self):
ia = InvitationAdmin(Invitation, custom_site)
request = self.factory.get('/invitation/')
list_select_related = ia.get_list_select_related(request)
cl = ChangeList(request, Child, ia.list_display, ia.list_display_links,
ia.list_filter, ia.date_hierarchy, ia.search_fields,
list_select_related, ia.list_per_page,
ia.list_max_show_all, ia.list_editable, ia)
self.assertEqual(cl.queryset.query.select_related, {'player': {}})
def test_select_related_as_empty_tuple(self):
ia = InvitationAdmin(Invitation, custom_site)
ia.list_select_related = ()
request = self.factory.get('/invitation/')
list_select_related = ia.get_list_select_related(request)
cl = ChangeList(request, Child, ia.list_display, ia.list_display_links,
ia.list_filter, ia.date_hierarchy, ia.search_fields,
list_select_related, ia.list_per_page,
ia.list_max_show_all, ia.list_editable, ia)
self.assertEqual(cl.queryset.query.select_related, False)
def test_get_select_related_custom_method(self):
class GetListSelectRelatedAdmin(admin.ModelAdmin):
list_display = ('band', 'player')
def get_list_select_related(self, request):
return ('band', 'player')
ia = GetListSelectRelatedAdmin(Invitation, custom_site)
request = self.factory.get('/invitation/')
list_select_related = ia.get_list_select_related(request)
cl = ChangeList(request, Child, ia.list_display, ia.list_display_links,
ia.list_filter, ia.date_hierarchy, ia.search_fields,
list_select_related, ia.list_per_page,
ia.list_max_show_all, ia.list_editable, ia)
self.assertEqual(cl.queryset.query.select_related, {'player': {}, 'band': {}})
def test_result_list_empty_changelist_value(self):
"""
Regression test for #14982: EMPTY_CHANGELIST_VALUE should be honored
for relationship fields
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = (
'<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th>'
'<td class="field-parent nowrap">-</td></tr></tbody>' % link
)
self.assertNotEqual(table_output.find(row_html), -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_set_empty_value_display_on_admin_site(self):
"""
Test that empty value display can be set on AdminSite
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
# Set a new empty display value on AdminSite.
admin.site.empty_value_display = '???'
m = ChildAdmin(Child, admin.site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = (
'<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th>'
'<td class="field-parent nowrap">???</td></tr></tbody>' % link
)
self.assertNotEqual(table_output.find(row_html), -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_set_empty_value_display_in_model_admin(self):
"""
Test that empty value display can be set in ModelAdmin or individual fields.
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = EmptyValueChildAdmin(Child, admin.site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = (
'<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th>'
'<td class="field-age_display">&dagger;</td><td class="field-age">-empty-</td></tr></tbody>' % link
)
self.assertNotEqual(table_output.find(row_html), -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_html(self):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = (
'<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th>'
'<td class="field-parent nowrap">Parent object</td></tr></tbody>' % link
)
self.assertNotEqual(table_output.find(row_html), -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_editable_html(self):
"""
Regression tests for #11791: Inclusion tag result_list generates a
table and this checks that the items are nested within the table
element tags.
Also a regression test for #13599, verifies that hidden fields
when list_editable is enabled are rendered in a div outside the
table.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
# make sure that hidden fields are in the correct place
hiddenfields_div = (
'<div class="hiddenfields">'
'<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" />'
'</div>'
) % new_child.id
self.assertInHTML(hiddenfields_div, table_output, msg_prefix='Failed to find hidden fields')
# make sure that list editable fields are rendered in divs correctly
editable_name_field = (
'<input name="form-0-name" value="name" class="vTextField" '
'maxlength="30" type="text" id="id_form-0-name" />'
)
self.assertInHTML(
'<td class="field-name">%s</td>' % editable_name_field,
table_output,
msg_prefix='Failed to find "name" list_editable field',
)
def test_result_list_editable(self):
"""
Regression test for #14312: list_editable with pagination
"""
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/', data={'p': -1}) # Anything outside range
m = ChildAdmin(Child, custom_site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
self.assertRaises(IncorrectLookupParameters, lambda:
ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m))
@ignore_warnings(category=RemovedInDjango20Warning)
def test_result_list_with_allow_tags(self):
"""
Test for deprecation of allow_tags attribute
"""
new_parent = Parent.objects.create(name='parent')
for i in range(2):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
def custom_method(self, obj=None):
return 'Unsafe html <br />'
custom_method.allow_tags = True
# Add custom method with allow_tags attribute
m.custom_method = custom_method
m.list_display = ['id', 'name', 'parent', 'custom_method']
cl = ChangeList(
request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m
)
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
custom_field_html = '<td class="field-custom_method">Unsafe html <br /></td>'
self.assertInHTML(custom_field_html, table_output)
def test_custom_paginator(self):
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/')
m = CustomPaginationAdmin(Child, custom_site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
self.assertIsInstance(cl.paginator, CustomPaginator)
def test_distinct_for_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Basic ManyToMany.
"""
blues = Genre.objects.create(name='Blues')
band = Band.objects.create(name='B.B. King Review', nr_of_members=11)
band.genres.add(blues)
band.genres.add(blues)
m = BandAdmin(Band, custom_site)
request = self.factory.get('/band/', data={'genres': blues.pk})
cl = ChangeList(request, Band, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. With an intermediate model.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = GroupAdmin(Group, custom_site)
request = self.factory.get('/group/', data={'members': lead.pk})
cl = ChangeList(request, Group, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_at_second_level_in_list_filter(self):
"""
When using a ManyToMany in list_filter at the second level behind a
ForeignKey, distinct() must be called and results shouldn't appear more
than once.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Concert.objects.create(name='Woodstock', group=band)
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = ConcertAdmin(Concert, custom_site)
request = self.factory.get('/concert/', data={'group__members': lead.pk})
cl = ChangeList(request, Concert, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Concert instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_inherited_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Model managed in the
admin inherits from the one that defins the relationship.
"""
lead = Musician.objects.create(name='John')
four = Quartet.objects.create(name='The Beatles')
Membership.objects.create(group=four, music=lead, role='lead voice')
Membership.objects.create(group=four, music=lead, role='guitar player')
m = QuartetAdmin(Quartet, custom_site)
request = self.factory.get('/quartet/', data={'members': lead.pk})
cl = ChangeList(request, Quartet, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Quartet instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_m2m_to_inherited_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Target of the relationship
inherits from another.
"""
lead = ChordsMusician.objects.create(name='Player A')
three = ChordsBand.objects.create(name='The Chords Trio')
Invitation.objects.create(band=three, player=lead, instrument='guitar')
Invitation.objects.create(band=three, player=lead, instrument='bass')
m = ChordsBandAdmin(ChordsBand, custom_site)
request = self.factory.get('/chordsband/', data={'members': lead.pk})
cl = ChangeList(request, ChordsBand, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one ChordsBand instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_non_unique_related_object_in_list_filter(self):
"""
Regressions tests for #15819: If a field listed in list_filters
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
# Two children with the same name
Child.objects.create(parent=parent, name='Daniel')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, custom_site)
request = self.factory.get('/parent/', data={'child__name': 'Daniel'})
cl = ChangeList(request, Parent, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_distinct_for_non_unique_related_object_in_search_fields(self):
"""
Regressions tests for #15819: If a field listed in search_fields
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
Child.objects.create(parent=parent, name='Danielle')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, custom_site)
request = self.factory.get('/parent/', data={SEARCH_VAR: 'daniel'})
cl = ChangeList(request, Parent, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_distinct_for_many_to_many_at_second_level_in_search_fields(self):
"""
When using a ManyToMany in search_fields at the second level behind a
ForeignKey, distinct() must be called and results shouldn't appear more
than once.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Concert.objects.create(name='Woodstock', group=band)
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = ConcertAdmin(Concert, custom_site)
request = self.factory.get('/concert/', data={SEARCH_VAR: 'vox'})
cl = ChangeList(request, Concert, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# There's only one Concert instance
self.assertEqual(cl.queryset.count(), 1)
def test_pagination(self):
"""
Regression tests for #12893: Pagination in admins changelist doesn't
use queryset set by modeladmin.
"""
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
request = self.factory.get('/child/')
# Test default queryset
m = ChildAdmin(Child, custom_site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all,
m.list_editable, m)
self.assertEqual(cl.queryset.count(), 60)
self.assertEqual(cl.paginator.count, 60)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3, 4, 5, 6])
# Test custom queryset
m = FilteredChildAdmin(Child, custom_site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all,
m.list_editable, m)
self.assertEqual(cl.queryset.count(), 30)
self.assertEqual(cl.paginator.count, 30)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3])
def test_computed_list_display_localization(self):
"""
Regression test for #13196: output of functions should be localized
in the changelist.
"""
User.objects.create_superuser(
username='super', email='super@localhost', password='secret')
self.client.login(username='super', password='secret')
event = Event.objects.create(date=datetime.date.today())
response = self.client.get(reverse('admin:admin_changelist_event_changelist'))
self.assertContains(response, formats.localize(event.date))
self.assertNotContains(response, six.text_type(event.date))
def test_dynamic_list_display(self):
"""
Regression tests for #14206: dynamic list_display support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertNotContains(response, 'Parent object')
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ['name', 'age'])
self.assertEqual(list_display_links, ['name'])
# Test with user 'parents'
m = DynamicListDisplayChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
custom_site.unregister(Child)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['parent'])
# Test default implementation
custom_site.register(Child, ChildAdmin)
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
def test_show_all(self):
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
# Add "show all" parameter to request
request = self.factory.get('/child/', data={ALL_VAR: ''})
# Test valid "show all" request (number of total objects is under max)
m = ChildAdmin(Child, custom_site)
# 200 is the max we'll pass to ChangeList
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, 200, m.list_editable, m)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 60)
# Test invalid "show all" request (number of total objects over max)
# falls back to paginated pages
m = ChildAdmin(Child, custom_site)
# 30 is the max we'll pass to ChangeList for this test
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, 30, m.list_editable, m)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 10)
def test_dynamic_list_display_links(self):
"""
Regression tests for #16257: dynamic list_display_links support.
"""
parent = Parent.objects.create(name='parent')
for i in range(1, 10):
Child.objects.create(id=i, name='child %s' % i, parent=parent, age=i)
m = DynamicListDisplayLinksChildAdmin(Child, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/child/', superuser)
response = m.changelist_view(request)
for i in range(1, 10):
link = reverse('admin:admin_changelist_child_change', args=(i,))
self.assertContains(response, '<a href="%s">%s</a>' % (link, i))
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['age'])
def test_no_list_display_links(self):
"""#15185 -- Allow no links from the 'change list' view grid."""
p = Parent.objects.create(name='parent')
m = NoListDisplayLinksParentAdmin(Parent, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/parent/', superuser)
response = m.changelist_view(request)
link = reverse('admin:admin_changelist_parent_change', args=(p.pk,))
self.assertNotContains(response, '<a href="%s">' % link)
def test_tuple_list_display(self):
"""
Regression test for #17128
(ChangeList failing under Python 2.5 after r16319)
"""
swallow = Swallow.objects.create(origin='Africa', load='12.34', speed='22.2')
swallow2 = Swallow.objects.create(origin='Africa', load='12.34', speed='22.2')
swallow_o2o = SwallowOneToOne.objects.create(swallow=swallow2)
model_admin = SwallowAdmin(Swallow, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/swallow/', superuser)
response = model_admin.changelist_view(request)
# just want to ensure it doesn't blow up during rendering
self.assertContains(response, six.text_type(swallow.origin))
self.assertContains(response, six.text_type(swallow.load))
self.assertContains(response, six.text_type(swallow.speed))
# Reverse one-to-one relations should work.
self.assertContains(response, '<td class="field-swallowonetoone">-</td>')
self.assertContains(response, '<td class="field-swallowonetoone">%s</td>' % swallow_o2o)
def test_deterministic_order_for_unordered_model(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model doesn't have any default ordering defined.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
UnorderedObject.objects.create(id=counter, bool=True)
class UnorderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
custom_site.register(UnorderedObject, UnorderedObjectAdmin)
model_admin = UnorderedObjectAdmin(UnorderedObject, custom_site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/unorderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
custom_site.unregister(UnorderedObject)
# When no order is defined at all, everything is ordered by '-pk'.
check_results_order()
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
UnorderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
UnorderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
UnorderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
UnorderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
UnorderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_deterministic_order_for_model_ordered_by_its_manager(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model has a manager that defines a default ordering.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
OrderedObject.objects.create(id=counter, bool=True, number=counter)
class OrderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
custom_site.register(OrderedObject, OrderedObjectAdmin)
model_admin = OrderedObjectAdmin(OrderedObject, custom_site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/orderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
custom_site.unregister(OrderedObject)
# When no order is defined at all, use the model's default ordering (i.e. 'number')
check_results_order(ascending=True)
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
OrderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
OrderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
OrderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
OrderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
OrderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_dynamic_list_filter(self):
"""
Regression tests for ticket #17646: dynamic list_filter support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = DynamicListFilterChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ['name', 'age'])
# Test with user 'parents'
m = DynamicListFilterChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ('parent', 'name', 'age'))
def test_dynamic_search_fields(self):
child = self._create_superuser('child')
m = DynamicSearchFieldsChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', child)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].search_fields, ('name', 'age'))
def test_pagination_page_range(self):
"""
Regression tests for ticket #15653: ensure the number of pages
generated for changelist views are correct.
"""
# instantiating and setting up ChangeList object
m = GroupAdmin(Group, custom_site)
request = self.factory.get('/group/')
cl = ChangeList(request, Group, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
per_page = cl.list_per_page = 10
for page_num, objects_count, expected_page_range in [
(0, per_page, []),
(0, per_page * 2, list(range(2))),
(5, per_page * 11, list(range(11))),
(5, per_page * 12, [0, 1, 2, 3, 4, 5, 6, 7, 8, '.', 10, 11]),
(6, per_page * 12, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, 10, 11]),
(6, per_page * 13, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, '.', 11, 12]),
]:
# assuming we have exactly `objects_count` objects
Group.objects.all().delete()
for i in range(objects_count):
Group.objects.create(name='test band')
# setting page number and calculating page range
cl.page_num = page_num
cl.get_results(request)
real_page_range = pagination(cl)['page_range']
self.assertListEqual(
expected_page_range,
list(real_page_range),
)
class AdminLogNodeTestCase(TestCase):
def test_get_admin_log_templatetag_custom_user(self):
"""
Regression test for ticket #20088: admin log depends on User model
having id field as primary key.
The old implementation raised an AttributeError when trying to use
the id field.
"""
context = Context({'user': CustomIdUser()})
template_string = '{% load log %}{% get_admin_log 10 as admin_log for_user user %}'
template = Template(template_string)
# Rendering should be u'' since this templatetag just logs,
# it doesn't render any string.
self.assertEqual(template.render(context), '')
def test_get_admin_log_templatetag_no_user(self):
"""
The {% get_admin_log %} tag should work without specifying a user.
"""
user = User(username='jondoe', password='secret', email='super@example.com')
user.save()
ct = ContentType.objects.get_for_model(User)
LogEntry.objects.log_action(user.pk, ct.pk, user.pk, repr(user), 1)
t = Template(
'{% load log %}'
'{% get_admin_log 100 as admin_log %}'
'{% for entry in admin_log %}'
'{{ entry|safe }}'
'{% endfor %}'
)
self.assertEqual(t.render(Context({})), 'Added "<User: jondoe>".')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_changelist.urls")
class SeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_changelist'] + AdminSeleniumWebDriverTestCase.available_apps
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def setUp(self):
# password = "secret"
User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='super@example.com',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime.datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def test_add_row_selection(self):
"""
Ensure that the status line for selected rows gets updated correcly (#22038)
"""
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:auth_user_changelist')))
form_id = '#changelist-form'
# Test amount of rows in the Changelist
rows = self.selenium.find_elements_by_css_selector(
'%s #result_list tbody tr' % form_id)
self.assertEqual(len(rows), 1)
# Test current selection
selection_indicator = self.selenium.find_element_by_css_selector(
'%s .action-counter' % form_id)
self.assertEqual(selection_indicator.text, "0 of 1 selected")
# Select a row and check again
row_selector = self.selenium.find_element_by_css_selector(
'%s #result_list tbody tr:first-child .action-select' % form_id)
row_selector.click()
self.assertEqual(selection_indicator.text, "1 of 1 selected")
class SeleniumChromeTests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class SeleniumIETests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
| bsd-3-clause |
WebSpider/headphones | lib/apscheduler/schedulers/background.py | 33 | 1468 | from __future__ import absolute_import
from threading import Thread, Event
from apscheduler.schedulers.base import BaseScheduler
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.util import asbool
class BackgroundScheduler(BlockingScheduler):
"""
A scheduler that runs in the background using a separate thread
(:meth:`~apscheduler.schedulers.base.BaseScheduler.start` will return immediately).
Extra options:
========== ============================================================================================
``daemon`` Set the ``daemon`` option in the background thread (defaults to ``True``,
see `the documentation <https://docs.python.org/3.4/library/threading.html#thread-objects>`_
for further details)
========== ============================================================================================
"""
_thread = None
def _configure(self, config):
self._daemon = asbool(config.pop('daemon', True))
super(BackgroundScheduler, self)._configure(config)
def start(self):
BaseScheduler.start(self)
self._event = Event()
self._thread = Thread(target=self._main_loop, name='APScheduler')
self._thread.daemon = self._daemon
self._thread.start()
def shutdown(self, wait=True):
super(BackgroundScheduler, self).shutdown(wait)
self._thread.join()
del self._thread
| gpl-3.0 |
Ramalus/kivy | examples/widgets/effectwidget3_advanced.py | 43 | 1665 | '''
This example demonstrates creating and usind an AdvancedEffectBase. In
this case, we use it to efficiently pass the touch coordinates into the shader.
'''
from kivy.base import runTouchApp
from kivy.properties import ListProperty
from kivy.lang import Builder
from kivy.uix.effectwidget import EffectWidget, AdvancedEffectBase
effect_string = '''
uniform vec2 touch;
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
vec2 distance = 0.025*(coords - touch);
float dist_mag = (distance.x*distance.x + distance.y*distance.y);
vec3 multiplier = vec3(abs(sin(dist_mag - time)));
return vec4(multiplier * color.xyz, 1.0);
}
'''
class TouchEffect(AdvancedEffectBase):
touch = ListProperty([0.0, 0.0])
def __init__(self, *args, **kwargs):
super(TouchEffect, self).__init__(*args, **kwargs)
self.glsl = effect_string
self.uniforms = {'touch': [0.0, 0.0]}
def on_touch(self, *args, **kwargs):
self.uniforms['touch'] = [float(i) for i in self.touch]
class TouchWidget(EffectWidget):
def __init__(self, *args, **kwargs):
super(TouchWidget, self).__init__(*args, **kwargs)
self.effect = TouchEffect()
self.effects = [self.effect]
def on_touch_down(self, touch):
super(TouchWidget, self).on_touch_down(touch)
self.on_touch_move(touch)
def on_touch_move(self, touch):
self.effect.touch = touch.pos
root = Builder.load_string('''
TouchWidget:
Button:
text: 'Some text!'
Image:
source: 'data/logo/kivy-icon-512.png'
allow_stretch: True
keep_ratio: False
''')
runTouchApp(root)
| mit |
vicky2135/lucious | oscar/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/hebrewprober.py | 2929 | 13359 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
| bsd-3-clause |
pataquets/phantomjs | src/qt/qtbase/src/3rdparty/freetype/src/tools/chktrcmp.py | 381 | 3826 | #!/usr/bin/env python
#
# Check trace components in FreeType 2 source.
# Author: suzuki toshiya, 2009
#
# This code is explicitly into the public domain.
import sys
import os
import re
SRC_FILE_LIST = []
USED_COMPONENT = {}
KNOWN_COMPONENT = {}
SRC_FILE_DIRS = [ "src" ]
TRACE_DEF_FILES = [ "include/freetype/internal/fttrace.h" ]
# --------------------------------------------------------------
# Parse command line options
#
for i in range( 1, len( sys.argv ) ):
if sys.argv[i].startswith( "--help" ):
print "Usage: %s [option]" % sys.argv[0]
print "Search used-but-defined and defined-but-not-used trace_XXX macros"
print ""
print " --help:"
print " Show this help"
print ""
print " --src-dirs=dir1:dir2:..."
print " Specify the directories of C source files to be checked"
print " Default is %s" % ":".join( SRC_FILE_DIRS )
print ""
print " --def-files=file1:file2:..."
print " Specify the header files including FT_TRACE_DEF()"
print " Default is %s" % ":".join( TRACE_DEF_FILES )
print ""
exit(0)
if sys.argv[i].startswith( "--src-dirs=" ):
SRC_FILE_DIRS = sys.argv[i].replace( "--src-dirs=", "", 1 ).split( ":" )
elif sys.argv[i].startswith( "--def-files=" ):
TRACE_DEF_FILES = sys.argv[i].replace( "--def-files=", "", 1 ).split( ":" )
# --------------------------------------------------------------
# Scan C source and header files using trace macros.
#
c_pathname_pat = re.compile( '^.*\.[ch]$', re.IGNORECASE )
trace_use_pat = re.compile( '^[ \t]*#define[ \t]+FT_COMPONENT[ \t]+trace_' )
for d in SRC_FILE_DIRS:
for ( p, dlst, flst ) in os.walk( d ):
for f in flst:
if c_pathname_pat.match( f ) != None:
src_pathname = os.path.join( p, f )
line_num = 0
for src_line in open( src_pathname, 'r' ):
line_num = line_num + 1
src_line = src_line.strip()
if trace_use_pat.match( src_line ) != None:
component_name = trace_use_pat.sub( '', src_line )
if component_name in USED_COMPONENT:
USED_COMPONENT[component_name].append( "%s:%d" % ( src_pathname, line_num ) )
else:
USED_COMPONENT[component_name] = [ "%s:%d" % ( src_pathname, line_num ) ]
# --------------------------------------------------------------
# Scan header file(s) defining trace macros.
#
trace_def_pat_opn = re.compile( '^.*FT_TRACE_DEF[ \t]*\([ \t]*' )
trace_def_pat_cls = re.compile( '[ \t\)].*$' )
for f in TRACE_DEF_FILES:
line_num = 0
for hdr_line in open( f, 'r' ):
line_num = line_num + 1
hdr_line = hdr_line.strip()
if trace_def_pat_opn.match( hdr_line ) != None:
component_name = trace_def_pat_opn.sub( '', hdr_line )
component_name = trace_def_pat_cls.sub( '', component_name )
if component_name in KNOWN_COMPONENT:
print "trace component %s is defined twice, see %s and fttrace.h:%d" % \
( component_name, KNOWN_COMPONENT[component_name], line_num )
else:
KNOWN_COMPONENT[component_name] = "%s:%d" % \
( os.path.basename( f ), line_num )
# --------------------------------------------------------------
# Compare the used and defined trace macros.
#
print "# Trace component used in the implementations but not defined in fttrace.h."
cmpnt = USED_COMPONENT.keys()
cmpnt.sort()
for c in cmpnt:
if c not in KNOWN_COMPONENT:
print "Trace component %s (used in %s) is not defined." % ( c, ", ".join( USED_COMPONENT[c] ) )
print "# Trace component is defined but not used in the implementations."
cmpnt = KNOWN_COMPONENT.keys()
cmpnt.sort()
for c in cmpnt:
if c not in USED_COMPONENT:
if c != "any":
print "Trace component %s (defined in %s) is not used." % ( c, KNOWN_COMPONENT[c] )
| bsd-3-clause |
meteorcloudy/tensorflow | tensorflow/contrib/mixed_precision/python/loss_scale_manager.py | 12 | 7768 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LossScaleManager classes for mixed precision training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
@six.add_metaclass(abc.ABCMeta)
class LossScaleManager(object):
"""Abstract loss scale manager class.
Loss scale managers with a different strategy should subclass this class.
Loss scaling is a process that:
1) Applies a multiplier on the loss before computing gradients, and
2) Applies the reciprocal of the multiplier on the gradients before they are
applied on variables.
This class is used together with
@{tf.contrib.mixed_precision.LossScaleOptimizer} for mixed precision training
(float32 variables and float16 ops) on Nvidia GPUs in order to achieve the
same model quality as single precision training, with the benefits of
potential higher throughput.
See @{tf.contrib.mixed_precision.LossScaleOptimizer} for more details.
"""
@abc.abstractmethod
def get_loss_scale(self):
"""Returns the loss scale as a scalar `float32` tensor."""
pass
@abc.abstractmethod
def update_loss_scale(self, finite_grads):
"""Updates loss scale based on if gradients are finite in current step.
Args:
finite_grads: bool scalar tensor indicating if all gradients are
finite (i.e., not inf or nan).
Returns:
An op, when executed updates the loss scale. If eager execution is
enabled, does not return anything.
"""
del finite_grads
return
class FixedLossScaleManager(LossScaleManager):
"""Loss scale manager with a fixed loss scale.
The loss scale is not updated for the lifetime of the class.
"""
def __init__(self, loss_scale):
"""Creates the fixed loss scale manager.
Args:
loss_scale: A Python float. Its ideal value varies depending on models to
run. Choosing a too small loss_scale might affect model quality; a too
big loss_scale might cause inf or nan. There is no single right
loss_scale to apply. There is no harm choosing a relatively big number
as long as no nan or inf is encountered in training.
Raises:
ValueError: If loss_scale is less than 1.
"""
if loss_scale < 1:
raise ValueError("loss scale must be at least 1.")
self._loss_scale = ops.convert_to_tensor(loss_scale, dtype=dtypes.float32)
def get_loss_scale(self):
return self._loss_scale
def update_loss_scale(self, finite_grads):
del finite_grads
return gen_control_flow_ops.no_op()
class ExponentialUpdateLossScaleManager(LossScaleManager):
"""Loss scale manager uses an exponential update strategy.
In general, the strategy increases loss scale by a greater-than-one factor
after encountering a consecutive series of steps with finite gradients;
Similarly, it decreases the loss scale by a factor when the accumulated number
of steps with non-finite (nan or inf) gradients are met. An update is not
applied if its result is less than 1 or overflows the float32 dynamic range.
The number of finite and non-finite steps are cleared every time the loss
scale is changed. The condition to decrease the loss scale is looser than to
increase it since the former does not require the steps to be consecutive.
"""
def __init__(self,
init_loss_scale,
incr_every_n_steps,
decr_every_n_nan_or_inf=2,
incr_ratio=2,
decr_ratio=0.8):
"""Constructor of exponential-update loss scale manager.
Args:
init_loss_scale: A Python float. The loss scale to use at the beginning.
incr_every_n_steps: Increases loss scale every n consecutive steps with
finite gradients.
decr_every_n_nan_or_inf: Decreases loss scale every n accumulated steps
with nan or inf gradients.
incr_ratio: The multiplier to use when increasing the loss scale.
decr_ratio: The less-than-one-multiplier to use when decreasing the loss
scale.
"""
self._incr_every_n_steps = incr_every_n_steps
self._decr_every_n_nan_or_inf = decr_every_n_nan_or_inf
self._incr_ratio = incr_ratio
self._decr_ratio = decr_ratio
self._loss_scale = variable_scope.variable(
name="loss_scale",
initial_value=ops.convert_to_tensor(init_loss_scale, dtypes.float32),
dtype=dtypes.float32,
trainable=False)
self._num_good_steps = variable_scope.variable(
name="good_steps", initial_value=0, dtype=dtypes.int32, trainable=False)
self._num_bad_steps = variable_scope.variable(
name="bad_steps", initial_value=0, dtype=dtypes.int32, trainable=False)
def _reset_stats(self):
return control_flow_ops.group(
state_ops.assign(self._num_good_steps, 0),
state_ops.assign(self._num_bad_steps, 0))
def get_loss_scale(self):
"""Returns the loss scale."""
return self._loss_scale
def update_loss_scale(self, finite_grads):
"""Updates loss scale based on if gradients are finite in current step."""
def update_if_finite_grads():
"""Branch function when grads are all finite."""
def incr_loss_scale():
new_loss_scale = control_flow_ops.cond(
gen_math_ops.is_finite(self._loss_scale * self._incr_ratio),
lambda: self._loss_scale * self._incr_ratio,
lambda: self._loss_scale)
update_op = state_ops.assign(self._loss_scale, new_loss_scale)
# When loss_scale is updated, both good and bad steps are reset.
return control_flow_ops.group(update_op, self._reset_stats())
return control_flow_ops.cond(
self._num_good_steps + 1 >= self._incr_every_n_steps,
incr_loss_scale,
lambda: state_ops.assign_add(self._num_good_steps, 1).op)
def update_if_not_finite_grads():
"""Branch function when any grad is not finite."""
def decr_loss_scale():
update_op = state_ops.assign(
self._loss_scale,
gen_math_ops.maximum(1., self._loss_scale * self._decr_ratio))
# When loss_scale is updated, both good and bad steps are reset.
return control_flow_ops.group(update_op, self._reset_stats())
def just_update_steps():
# When bad_steps is incremented, good_step is reset.
return control_flow_ops.group(
state_ops.assign_add(self._num_bad_steps, 1),
state_ops.assign(self._num_good_steps, 0))
return control_flow_ops.cond(
self._num_bad_steps + 1 >= self._decr_every_n_nan_or_inf,
decr_loss_scale, just_update_steps)
return control_flow_ops.cond(finite_grads, update_if_finite_grads,
update_if_not_finite_grads)
| apache-2.0 |
deepinsight/Deformable-ConvNets | deeplab/function/test_deeplab.py | 1 | 3137 | # --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2016 by Contributors
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified by Zheng Zhang
# --------------------------------------------------------
import argparse
import pprint
import logging
import time
import os
import mxnet as mx
from config.config import config, generate_config, update_config
from config.dataset_conf import dataset
from config.network_conf import network
from symbols import *
from dataset import *
from core.loader import TestDataLoader
from core.tester import Predictor, pred_eval
from utils.load_model import load_param
def test_deeplab(network, dataset, image_set, root_path, dataset_path,
ctx, prefix, epoch,
vis, logger=None, output_path=None):
if not logger:
assert False, 'require a logger'
# print config
pprint.pprint(config)
logger.info('testing config:{}\n'.format(pprint.pformat(config)))
# load symbol and testing data
sym = eval('get_' + network + '_test')(num_classes=config.dataset.NUM_CLASSES)
imdb = eval(dataset)(image_set, root_path, dataset_path, result_path=output_path)
segdb = imdb.gt_segdb()
# get test data iter
test_data = TestDataLoader(segdb, batch_size=len(ctx))
# load model
# arg_params, aux_params = load_param(prefix, epoch, convert=True, ctx=ctx, process=True)
arg_params, aux_params = load_param(prefix, epoch, process=True)
# infer shape
data_shape_dict = dict(test_data.provide_data_single)
arg_shape, _, aux_shape = sym.infer_shape(**data_shape_dict)
arg_shape_dict = dict(zip(sym.list_arguments(), arg_shape))
aux_shape_dict = dict(zip(sym.list_auxiliary_states(), aux_shape))
# check parameters
for k in sym.list_arguments():
if k in data_shape_dict or k in ['softmax_label']:
continue
assert k in arg_params, k + ' not initialized'
assert arg_params[k].shape == arg_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(arg_shape_dict[k]) + ' provided ' + str(arg_params[k].shape)
for k in sym.list_auxiliary_states():
assert k in aux_params, k + ' not initialized'
assert aux_params[k].shape == aux_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(aux_shape_dict[k]) + ' provided ' + str(aux_params[k].shape)
# decide maximum shape
data_names = [k[0] for k in test_data.provide_data_single]
label_names = ['softmax_label']
max_data_shape = [[('data', (1, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]]
# create predictor
predictor = Predictor(sym, data_names, label_names,
context=ctx, max_data_shapes=max_data_shape,
provide_data=test_data.provide_data, provide_label=test_data.provide_label,
arg_params=arg_params, aux_params=aux_params)
# start detection
pred_eval(predictor, test_data, imdb, vis=vis, logger=logger)
| apache-2.0 |
zerobatu/edx-platform | lms/djangoapps/certificates/migrations/0013_auto__add_field_generatedcertificate_error_reason.py | 188 | 5642 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'GeneratedCertificate.error_reason'
db.add_column('certificates_generatedcertificate', 'error_reason',
self.gf('django.db.models.fields.CharField')(default='', max_length=512, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'GeneratedCertificate.error_reason'
db.delete_column('certificates_generatedcertificate', 'error_reason')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'certificates.generatedcertificate': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'GeneratedCertificate'},
'course_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'distinction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'download_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'download_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'error_reason': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'grade': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'unavailable'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'verify_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['certificates']
| agpl-3.0 |
rizar/attention-lvcsr | libs/Theano/theano/tensor/tests/test_keepdims.py | 3 | 3963 | import unittest
import numpy
import theano
from theano import tensor, function
from theano.tests.unittest_tools import attr
# this tests other ops to ensure they keep the dimensions of their
# inputs correctly
class TestKeepDims(unittest.TestCase):
def makeKeepDims_local(self, x, y, axis):
if axis is None:
newaxis = list(range(x.ndim))
elif isinstance(axis, int):
if axis < 0:
newaxis = [axis + x.type.ndim]
else:
newaxis = [axis]
else:
newaxis = []
for a in axis:
if a < 0:
a += x.type.ndim
newaxis.append(a)
i = 0
new_dims = []
for j, _ in enumerate(x.shape):
if j in newaxis:
new_dims.append('x')
else:
new_dims.append(i)
i += 1
return tensor.DimShuffle(y.type.broadcastable, new_dims)(y)
@attr('slow')
def test_keepdims(self):
x = tensor.dtensor3()
a = numpy.random.rand(3, 2, 4)
# We don't need to test all opt and C code, as this is tested
# by the ops tests.
mode = theano.compile.Mode(optimizer="fast_compile", linker="py")
# 'max_and_argmax' has two outputs and can be specified with either
# a single or every axis:
for axis in [0, 1, 2, [0], [1], [2], None, [0, 1, 2],
[-1], [-2], [-3], [-1, -2, -3], [0, -1, -2],
[-2, -3, 2]]:
op = tensor.max_and_argmax
f = function([x], [op(x, axis=axis, keepdims=True)[0],
self.makeKeepDims_local(
x, op(x, axis=axis, keepdims=False)[0],
axis)],
mode=mode)
ans1, ans2 = f(a)
assert numpy.allclose(ans1, ans2)
assert ans1.shape == ans2.shape
f = function([x], [op(x, axis=axis, keepdims=True)[1],
self.makeKeepDims_local(
x, op(x, axis=axis, keepdims=False)[1],
axis)],
mode=mode)
ans1, ans2 = f(a)
assert numpy.allclose(ans1, ans2)
assert ans1.shape == ans2.shape
# the following ops can be specified with either a single axis or every
# axis:
for op in ([tensor.argmax, tensor.argmin]):
for axis in [0, 1, 2, [0], [1], [2], None, [0, 1, 2],
[-1], [-2], [-3], [-1, -2, -3], [0, -2, 2]]:
f = function([x], [op(x, axis=axis, keepdims=True),
self.makeKeepDims_local(
x, op(x, axis=axis, keepdims=False),
axis)],
mode=mode)
ans1, ans2 = f(a)
assert numpy.allclose(ans1, ans2)
assert ans1.shape == ans2.shape
# the following ops can be specified with a freely specified axis
# parameter
for op in ([tensor.sum, tensor.prod, tensor.mean, tensor.var,
tensor.std, tensor.all, tensor.any,
tensor.max, tensor.min]):
for axis in [0, 1, 2, [0], [1], [2], None,
[0, 1], [1, 2], [0, 1, 2],
[-1], [-2], [-3], [-1, -2], [-1, -2, -3], [0, -2, 2]]:
f = function([x], [op(x, axis=axis, keepdims=True),
self.makeKeepDims_local(
x, op(x, axis=axis, keepdims=False),
axis)],
mode=mode)
ans1, ans2 = f(a)
assert numpy.allclose(ans1, ans2)
assert ans1.shape == ans2.shape
| mit |
squarerootfury/distrochooser | backend/distrochooser/migrations/0017_auto_20190406_1159.py | 2 | 1484 | # Generated by Django 2.1.2 on 2019-04-06 09:59
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('distrochooser', '0016_auto_20190331_1356'),
]
operations = [
migrations.CreateModel(
name='AnswerDistributionMatrix',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('isBlockingHit', models.BooleanField(default=False)),
('description', models.CharField(default='', max_length=300)),
('answer', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='distrochooser.Answer')),
('blockedAnswers', models.ManyToManyField(related_name='blockedAnswers', to='distrochooser.Answer')),
('distro', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='distrochooser.Distribution')),
],
),
migrations.AlterField(
model_name='usersession',
name='dateTime',
field=models.DateTimeField(default=datetime.datetime(2019, 4, 6, 11, 59, 0, 272404)),
),
migrations.AlterField(
model_name='usersession',
name='publicUrl',
field=models.CharField(default='', max_length=200),
),
]
| mit |
dset0x/invenio-workflows | invenio_workflows/logger.py | 5 | 3182 | # -*- coding: utf-8 -*-
# This file is part of Invenio.
# Copyright (C) 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Logging part of workflows module."""
import logging
def get_logger(logger_name, db_handler_obj, level=10, **kwargs):
"""
Initialize and return a Python logger object.
You can specifiy the handlers to output logs in sys.stderr as well as the
datebase or anything you want.
"""
logging.basicConfig(level=level)
# Get a basic logger object
logger = logging.getLogger(logger_name)
if not logger.handlers:
# Create formatter and add it to the handlers
formatter = logging.Formatter(
'%(levelname)s %(asctime)s %(name)s %(message)s')
db_handler_obj.setFormatter(formatter)
db_handler_obj.setLevel(level)
logger.addHandler(db_handler_obj)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
stream_handler.setLevel(level)
logger.addHandler(stream_handler)
# Let's not propagate to root logger..
logger.propagate = 0
# FIXME: loglevels are simply overwritten somewhere in Celery
# even if Celery is not being "used".
#
# This means log.DEBUG is NOT working at the moment!
logger.setLevel(level)
# Add any kwargs to extra parameter and return logger
wrapped_logger = BibWorkflowLogAdapter(logger, kwargs)
return wrapped_logger
class BibWorkflowLogHandler(logging.Handler, object):
"""Implements a handler for logging to database."""
def __init__(self, model, id_name):
"""Instanciate a BibWorkflowLogHandler object."""
super(BibWorkflowLogHandler, self).__init__()
self.model = model
self.id_name = id_name
def emit(self, record):
""" Create the log object in database."""
from invenio.ext.sqlalchemy import db
log_obj = self.model(id_object=getattr(record.obj, self.id_name),
log_type=record.levelno,
message=record.msg)
db.session.add(log_obj)
db.session.commit()
class BibWorkflowLogAdapter(logging.LoggerAdapter):
"""
BibWorkflowLogAdapter class.
This example adapter expects the passed in dict-like object to have a
'obj' key, whose value in brackets is used during logging.
"""
def process(self, msg, kwargs):
""" Save kwargs in extra."""
kwargs['extra'] = self.extra
return msg, kwargs
| gpl-2.0 |
nitely/Spirit | spirit/topic/views.py | 1 | 3867 | # -*- coding: utf-8 -*-
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponsePermanentRedirect
from djconfig import config
from spirit.core.utils.views import is_post, post_data
from spirit.core.utils.paginator import paginate, yt_paginate
from spirit.core.utils.ratelimit.decorators import ratelimit
from spirit.category.models import Category
from spirit.comment.forms import CommentForm
from spirit.comment.utils import comment_posted
from spirit.comment.models import Comment
from .models import Topic
from .forms import TopicForm
from . import utils
@login_required
@ratelimit(rate='1/10s')
def publish(request, category_id=None):
if category_id:
get_object_or_404(
Category.objects.visible(),
pk=category_id)
user = request.user
form = TopicForm(
user=user,
data=post_data(request),
initial={'category': category_id})
cform = CommentForm(
user=user,
data=post_data(request))
if (is_post(request) and
all([form.is_valid(), cform.is_valid()]) and
not request.is_limited()):
if not user.st.update_post_hash(form.get_topic_hash()):
return redirect(
request.POST.get('next', None) or
form.get_category().get_absolute_url())
# wrap in transaction.atomic?
topic = form.save()
cform.topic = topic
comment = cform.save()
comment_posted(comment=comment, mentions=cform.mentions)
return redirect(topic.get_absolute_url())
return render(
request=request,
template_name='spirit/topic/publish.html',
context={'form': form, 'cform': cform})
@login_required
def update(request, pk):
topic = Topic.objects.for_update_or_404(pk, request.user)
category_id = topic.category_id
form = TopicForm(
user=request.user,
data=post_data(request),
instance=topic)
if is_post(request) and form.is_valid():
topic = form.save()
if topic.category_id != category_id:
Comment.create_moderation_action(
user=request.user, topic=topic, action=Comment.MOVED)
return redirect(request.POST.get('next', topic.get_absolute_url()))
return render(
request=request,
template_name='spirit/topic/update.html',
context={'form': form})
def detail(request, pk, slug):
topic = Topic.objects.get_public_or_404(pk, request.user)
if topic.slug != slug:
return HttpResponsePermanentRedirect(topic.get_absolute_url())
utils.topic_viewed(request=request, topic=topic)
comments = (
Comment.objects
.for_topic(topic=topic)
.with_likes(user=request.user)
.with_polls(user=request.user)
.order_by('date'))
comments = paginate(
comments,
per_page=config.comments_per_page,
page_number=request.GET.get('page', 1))
return render(
request=request,
template_name='spirit/topic/detail.html',
context={
'topic': topic,
'comments': comments})
def index_active(request):
categories = (
Category.objects
.visible()
.parents()
.ordered())
topics = (
Topic.objects
.visible()
.global_()
.with_bookmarks(user=request.user)
.order_by('-is_globally_pinned', '-last_active')
.select_related('category'))
topics = yt_paginate(
topics,
per_page=config.topics_per_page,
page_number=request.GET.get('page', 1))
return render(
request=request,
template_name='spirit/topic/active.html',
context={
'categories': categories,
'topics': topics})
| mit |
rhertzog/django | django/core/management/commands/runserver.py | 64 | 6371 | from __future__ import unicode_literals
import errno
import os
import re
import socket
import sys
from datetime import datetime
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.core.servers.basehttp import get_internal_wsgi_application, run
from django.utils import autoreload, six
from django.utils.encoding import force_text, get_system_encoding
naiveip_re = re.compile(r"""^(?:
(?P<addr>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
class Command(BaseCommand):
help = "Starts a lightweight Web server for development."
# Validation is called explicitly each time the server is reloaded.
requires_system_checks = False
leave_locale_alone = True
default_port = '8000'
def add_arguments(self, parser):
parser.add_argument(
'addrport', nargs='?',
help='Optional port number, or ipaddr:port'
)
parser.add_argument(
'--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use an IPv6 address.',
)
parser.add_argument(
'--nothreading', action='store_false', dest='use_threading', default=True,
help='Tells Django to NOT use threading.',
)
parser.add_argument(
'--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.',
)
def execute(self, *args, **options):
if options['no_color']:
# We rely on the environment because it's currently the only
# way to reach WSGIRequestHandler. This seems an acceptable
# compromise considering `runserver` runs indefinitely.
os.environ[str("DJANGO_COLORS")] = str("nocolor")
super(Command, self).execute(*args, **options)
def get_handler(self, *args, **options):
"""
Returns the default WSGI handler for the runner.
"""
return get_internal_wsgi_application()
def handle(self, *args, **options):
from django.conf import settings
if not settings.DEBUG and not settings.ALLOWED_HOSTS:
raise CommandError('You must set settings.ALLOWED_HOSTS if DEBUG is False.')
self.use_ipv6 = options['use_ipv6']
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError('Your Python does not support IPv6.')
self._raw_ipv6 = False
if not options['addrport']:
self.addr = ''
self.port = self.default_port
else:
m = re.match(naiveip_re, options['addrport'])
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % options['addrport'])
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." % self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.' % self.addr)
if not self.addr:
self.addr = '::1' if self.use_ipv6 else '127.0.0.1'
self._raw_ipv6 = self.use_ipv6
self.run(**options)
def run(self, **options):
"""
Runs the server, using the autoreloader if needed
"""
use_reloader = options['use_reloader']
if use_reloader:
autoreload.main(self.inner_run, None, options)
else:
self.inner_run(None, **options)
def inner_run(self, *args, **options):
# If an exception was silenced in ManagementUtility.execute in order
# to be raised in the child process, raise it now.
autoreload.raise_last_exception()
threading = options['use_threading']
# 'shutdown_message' is a stealth option.
shutdown_message = options.get('shutdown_message', '')
quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'
self.stdout.write("Performing system checks...\n\n")
self.check(display_num_errors=True)
# Need to check migrations here, so can't use the
# requires_migrations_check attribute.
self.check_migrations()
now = datetime.now().strftime('%B %d, %Y - %X')
if six.PY2:
now = now.decode(get_system_encoding())
self.stdout.write(now)
self.stdout.write((
"Django version %(version)s, using settings %(settings)r\n"
"Starting development server at http://%(addr)s:%(port)s/\n"
"Quit the server with %(quit_command)s.\n"
) % {
"version": self.get_version(),
"settings": settings.SETTINGS_MODULE,
"addr": '[%s]' % self.addr if self._raw_ipv6 else self.addr,
"port": self.port,
"quit_command": quit_command,
})
try:
handler = self.get_handler(*args, **options)
run(self.addr, int(self.port), handler,
ipv6=self.use_ipv6, threading=threading)
except socket.error as e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
errno.EACCES: "You don't have permission to access that port.",
errno.EADDRINUSE: "That port is already in use.",
errno.EADDRNOTAVAIL: "That IP address can't be assigned to.",
}
try:
error_text = ERRORS[e.errno]
except KeyError:
error_text = force_text(e)
self.stderr.write("Error: %s" % error_text)
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
self.stdout.write(shutdown_message)
sys.exit(0)
# Kept for backward compatibility
BaseRunserverCommand = Command
| bsd-3-clause |
gdimitris/ChessPuzzlerBackend | Virtual_Environment/lib/python2.7/site-packages/setuptools/command/install_scripts.py | 505 | 2231 | from distutils import log
import distutils.command.install_scripts as orig
import os
from pkg_resources import Distribution, PathMetadata, ensure_directory
class install_scripts(orig.install_scripts):
"""Do normal script install, plus any egg_info wrapper scripts"""
def initialize_options(self):
orig.install_scripts.initialize_options(self)
self.no_ep = False
def run(self):
import setuptools.command.easy_install as ei
self.run_command("egg_info")
if self.distribution.scripts:
orig.install_scripts.run(self) # run first to set up self.outfiles
else:
self.outfiles = []
if self.no_ep:
# don't install entry point scripts into .egg file!
return
ei_cmd = self.get_finalized_command("egg_info")
dist = Distribution(
ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
ei_cmd.egg_name, ei_cmd.egg_version,
)
bs_cmd = self.get_finalized_command('build_scripts')
exec_param = getattr(bs_cmd, 'executable', None)
bw_cmd = self.get_finalized_command("bdist_wininst")
is_wininst = getattr(bw_cmd, '_is_running', False)
writer = ei.ScriptWriter
if is_wininst:
exec_param = "python.exe"
writer = ei.WindowsScriptWriter
# resolve the writer to the environment
writer = writer.best()
cmd = writer.command_spec_class.best().from_param(exec_param)
for args in writer.get_args(dist, cmd.as_header()):
self.write_script(*args)
def write_script(self, script_name, contents, mode="t", *ignored):
"""Write an executable file to the scripts directory"""
from setuptools.command.easy_install import chmod, current_umask
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
f = open(target, "w" + mode)
f.write(contents)
f.close()
chmod(target, 0o777 - mask)
| mit |
arbrandes/edx-platform | lms/djangoapps/mobile_api/tests/test_mobile_platform.py | 5 | 2631 | """
Tests for Platform against Mobile App Request
"""
import ddt
from django.test import TestCase
from lms.djangoapps.mobile_api.mobile_platform import MobilePlatform
@ddt.ddt
class TestMobilePlatform(TestCase):
"""
Tests for platform against mobile app request
"""
@ddt.data(
("edX/org.edx.mobile (0.1.5; OS Version 9.2 (Build 13C75))", "iOS", "0.1.5"),
("edX/org.edx.mobile (1.01.1; OS Version 9.2 (Build 13C75))", "iOS", "1.01.1"),
("edX/org.edx.mobile (2.2.2; OS Version 9.2 (Build 13C75))", "iOS", "2.2.2"),
("edX/org.edx.mobile (3.3.3; OS Version 9.2 (Build 13C75))", "iOS", "3.3.3"),
("edX/org.edx.mobile (3.3.3.test; OS Version 9.2 (Build 13C75))", "iOS", "3.3.3.test"),
("edX/org.test-domain.mobile (0.1.5; OS Version 9.2 (Build 13C75))", "iOS", "0.1.5"),
("Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/1.1.1", "Android", "1.1.1"),
("Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/3.3.3.X", "Android", "3.3.3.X"),
("Dalvik/2.1.0 (Linux; U; Android 9; MI 6 MIUI/V11.0.3.0.PCAMIXM) edX/org.edx.mobile/2.17.1", "Android", "2.17.1"), # lint-amnesty, pylint: disable=line-too-long
("Dalvik/2.1.0 (Linux; U; Android 9; JKM-AL00a Build/HUAWEIJKM-AL00a) edX/org.edx.mobile/2.8.1", "Android", "2.8.1"), # lint-amnesty, pylint: disable=line-too-long
("Dalvik/2.1.0 (Linux; U; Android 8.1.0; CPH1803 Build/OPM1.171019.026) edX/org.edx.mobile/2.18.1", "Android", "2.18.1"), # lint-amnesty, pylint: disable=line-too-long
)
@ddt.unpack
def test_platform_instance(self, user_agent, platform_name, version):
platform = MobilePlatform.get_instance(user_agent)
assert platform_name == platform.NAME
assert version == platform.version
@ddt.data(
("Mozilla/5.0 (Linux; Android 5.1; Nexus 5 Build/LMY47I; wv) AppleWebKit/537.36 (KHTML, like Gecko) "
"Version/4.0 Chrome/47.0.2526.100 Mobile Safari/537.36 edX/org.edx.mobile/2.0.0"),
("Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) "
"Mobile/13C75 edX/org.edx.mobile/2.2.1"),
("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 "
"Safari/537.36"),
"edX/org.edx.mobile (0.1.5.2.; OS Version 9.2 (Build 13C75))",
"edX/org.edx.mobile (0.1.5.2.5.1; OS Version 9.2 (Build 13C75))",
)
def test_non_mobile_app_requests(self, user_agent):
assert MobilePlatform.get_instance(user_agent) is None
| agpl-3.0 |
shell909090/shadowsocks | shadowsocks/crypto/rc4_md5.py | 15 | 1393 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import hashlib
from shadowsocks.crypto import openssl
__all__ = ['ciphers']
def create_cipher(alg, key, iv, op, crypto_path=None,
key_as_bytes=0, d=None, salt=None,
i=1, padding=1):
md5 = hashlib.md5()
md5.update(key)
md5.update(iv)
rc4_key = md5.digest()
return openssl.OpenSSLStreamCrypto(b'rc4', rc4_key, b'', op, crypto_path)
ciphers = {
'rc4-md5': (16, 16, create_cipher),
}
def test():
from shadowsocks.crypto import util
cipher = create_cipher('rc4-md5', b'k' * 32, b'i' * 16, 1)
decipher = create_cipher('rc4-md5', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
if __name__ == '__main__':
test()
| apache-2.0 |
pp-mo/iris | docs/iris/example_code/Oceanography/atlantic_profiles.py | 2 | 3469 | """
Oceanographic profiles and T-S diagrams
=======================================
This example demonstrates how to plot vertical profiles of different
variables in the same axes, and how to make a scatter plot of two
variables. There is an oceanographic theme but the same techniques are
equally applicable to atmospheric or other kinds of data.
The data used are profiles of potential temperature and salinity in the
Equatorial and South Atlantic, output from an ocean model.
The y-axis of the first plot produced will be automatically inverted due to the
presence of the attribute positive=down on the depth coordinate. This means
depth values intuitively increase downward on the y-axis.
"""
import iris
import iris.iterate
import iris.plot as iplt
import matplotlib.pyplot as plt
def main():
# Load the gridded temperature and salinity data.
fname = iris.sample_data_path("atlantic_profiles.nc")
cubes = iris.load(fname)
(theta,) = cubes.extract("sea_water_potential_temperature")
(salinity,) = cubes.extract("sea_water_practical_salinity")
# Extract profiles of temperature and salinity from a particular point in
# the southern portion of the domain, and limit the depth of the profile
# to 1000m.
lon_cons = iris.Constraint(longitude=330.5)
lat_cons = iris.Constraint(latitude=lambda l: -10 < l < -9)
depth_cons = iris.Constraint(depth=lambda d: d <= 1000)
theta_1000m = theta.extract(depth_cons & lon_cons & lat_cons)
salinity_1000m = salinity.extract(depth_cons & lon_cons & lat_cons)
# Plot these profiles on the same set of axes. In each case we call plot
# with two arguments, the cube followed by the depth coordinate. Putting
# them in this order places the depth coordinate on the y-axis.
# The first plot is in the default axes. We'll use the same color for the
# curve and its axes/tick labels.
plt.figure(figsize=(5, 6))
temperature_color = (0.3, 0.4, 0.5)
ax1 = plt.gca()
iplt.plot(
theta_1000m,
theta_1000m.coord("depth"),
linewidth=2,
color=temperature_color,
alpha=0.75,
)
ax1.set_xlabel("Potential Temperature / K", color=temperature_color)
ax1.set_ylabel("Depth / m")
for ticklabel in ax1.get_xticklabels():
ticklabel.set_color(temperature_color)
# To plot salinity in the same axes we use twiny(). We'll use a different
# color to identify salinity.
salinity_color = (0.6, 0.1, 0.15)
ax2 = plt.gca().twiny()
iplt.plot(
salinity_1000m,
salinity_1000m.coord("depth"),
linewidth=2,
color=salinity_color,
alpha=0.75,
)
ax2.set_xlabel("Salinity / PSU", color=salinity_color)
for ticklabel in ax2.get_xticklabels():
ticklabel.set_color(salinity_color)
plt.tight_layout()
iplt.show()
# Now plot a T-S diagram using scatter. We'll use all the profiles here,
# and each point will be coloured according to its depth.
plt.figure(figsize=(6, 6))
depth_values = theta.coord("depth").points
for s, t in iris.iterate.izip(salinity, theta, coords="depth"):
iplt.scatter(s, t, c=depth_values, marker="+", cmap="RdYlBu_r")
ax = plt.gca()
ax.set_xlabel("Salinity / PSU")
ax.set_ylabel("Potential Temperature / K")
cb = plt.colorbar(orientation="horizontal")
cb.set_label("Depth / m")
plt.tight_layout()
iplt.show()
if __name__ == "__main__":
main()
| lgpl-3.0 |
mne-tools/mne-tools.github.io | 0.16/_downloads/plot_stats_cluster_time_frequency_repeated_measures_anova.py | 7 | 10025 | """
====================================================================
Mass-univariate twoway repeated measures ANOVA on single trial power
====================================================================
This script shows how to conduct a mass-univariate repeated measures
ANOVA. As the model to be fitted assumes two fully crossed factors,
we will study the interplay between perceptual modality
(auditory VS visual) and the location of stimulus presentation
(left VS right). Here we use single trials as replications
(subjects) while iterating over time slices plus frequency bands
for to fit our mass-univariate model. For the sake of simplicity we
will confine this analysis to one single channel of which we know
that it exposes a strong induced response. We will then visualize
each effect by creating a corresponding mass-univariate effect
image. We conclude with accounting for multiple comparisons by
performing a permutation clustering test using the ANOVA as
clustering function. The results final will be compared to
multiple comparisons using False Discovery Rate correction.
"""
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet
from mne.stats import f_threshold_mway_rm, f_mway_rm, fdr_correction
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
tmin, tmax = -0.2, 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
include = []
raw.info['bads'] += ['MEG 2443'] # bads
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
ch_name = 'MEG 1332'
# Load conditions
reject = dict(grad=4000e-13, eog=150e-6)
event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=True,
reject=reject)
epochs.pick_channels([ch_name]) # restrict example to one channel
###############################################################################
# We have to make sure all conditions have the same counts, as the ANOVA
# expects a fully balanced data matrix and does not forgive imbalances that
# generously (risk of type-I error).
epochs.equalize_event_counts(event_id)
# Factor to down-sample the temporal dimension of the TFR computed by
# tfr_morlet.
decim = 2
freqs = np.arange(7, 30, 3) # define frequencies of interest
n_cycles = freqs / freqs[0]
zero_mean = False # don't correct morlet wavelet to be of mean zero
# To have a true wavelet zero_mean should be True but here for illustration
# purposes it helps to spot the evoked response.
###############################################################################
# Create TFR representations for all conditions
# ---------------------------------------------
epochs_power = list()
for condition in [epochs[k] for k in event_id]:
this_tfr = tfr_morlet(condition, freqs, n_cycles=n_cycles,
decim=decim, average=False, zero_mean=zero_mean,
return_itc=False)
this_tfr.apply_baseline(mode='ratio', baseline=(None, 0))
this_power = this_tfr.data[:, 0, :, :] # we only have one channel.
epochs_power.append(this_power)
###############################################################################
# Setup repeated measures ANOVA
# -----------------------------
#
# We will tell the ANOVA how to interpret the data matrix in terms of factors.
# This is done via the factor levels argument which is a list of the number
# factor levels for each factor.
n_conditions = len(epochs.event_id)
n_replications = epochs.events.shape[0] // n_conditions
factor_levels = [2, 2] # number of levels in each factor
effects = 'A*B' # this is the default signature for computing all effects
# Other possible options are 'A' or 'B' for the corresponding main effects
# or 'A:B' for the interaction effect only (this notation is borrowed from the
# R formula language)
n_freqs = len(freqs)
times = 1e3 * epochs.times[::decim]
n_times = len(times)
###############################################################################
# Now we'll assemble the data matrix and swap axes so the trial replications
# are the first dimension and the conditions are the second dimension.
data = np.swapaxes(np.asarray(epochs_power), 1, 0)
# reshape last two dimensions in one mass-univariate observation-vector
data = data.reshape(n_replications, n_conditions, n_freqs * n_times)
# so we have replications * conditions * observations:
print(data.shape)
###############################################################################
# While the iteration scheme used above for assembling the data matrix
# makes sure the first two dimensions are organized as expected (with A =
# modality and B = location):
#
# .. table:: Sample data layout
#
# ===== ==== ==== ==== ====
# trial A1B1 A1B2 A2B1 B2B2
# ===== ==== ==== ==== ====
# 1 1.34 2.53 0.97 1.74
# ... ... ... ... ...
# 56 2.45 7.90 3.09 4.76
# ===== ==== ==== ==== ====
#
# Now we're ready to run our repeated measures ANOVA.
#
# Note. As we treat trials as subjects, the test only accounts for
# time locked responses despite the 'induced' approach.
# For analysis for induced power at the group level averaged TRFs
# are required.
fvals, pvals = f_mway_rm(data, factor_levels, effects=effects)
effect_labels = ['modality', 'location', 'modality by location']
# let's visualize our effects by computing f-images
for effect, sig, effect_label in zip(fvals, pvals, effect_labels):
plt.figure()
# show naive F-values in gray
plt.imshow(effect.reshape(8, 211), cmap=plt.cm.gray, extent=[times[0],
times[-1], freqs[0], freqs[-1]], aspect='auto',
origin='lower')
# create mask for significant Time-frequency locations
effect = np.ma.masked_array(effect, [sig > .05])
plt.imshow(effect.reshape(8, 211), cmap='RdBu_r', extent=[times[0],
times[-1], freqs[0], freqs[-1]], aspect='auto',
origin='lower')
plt.colorbar()
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title(r"Time-locked response for '%s' (%s)" % (effect_label, ch_name))
plt.show()
###############################################################################
# Account for multiple comparisons using FDR versus permutation clustering test
# -----------------------------------------------------------------------------
#
# First we need to slightly modify the ANOVA function to be suitable for
# the clustering procedure. Also want to set some defaults.
# Let's first override effects to confine the analysis to the interaction
effects = 'A:B'
###############################################################################
# A stat_fun must deal with a variable number of input arguments.
# Inside the clustering function each condition will be passed as flattened
# array, necessitated by the clustering procedure. The ANOVA however expects an
# input array of dimensions: subjects X conditions X observations (optional).
# The following function catches the list input and swaps the first and
# the second dimension and finally calls the ANOVA function.
def stat_fun(*args):
return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
effects=effects, return_pvals=False)[0]
# The ANOVA returns a tuple f-values and p-values, we will pick the former.
pthresh = 0.001 # set threshold rather high to save some time
f_thresh = f_threshold_mway_rm(n_replications, factor_levels, effects,
pthresh)
tail = 1 # f-test, so tail > 0
n_permutations = 256 # Save some time (the test won't be too sensitive ...)
T_obs, clusters, cluster_p_values, h0 = mne.stats.permutation_cluster_test(
epochs_power, stat_fun=stat_fun, threshold=f_thresh, tail=tail, n_jobs=1,
n_permutations=n_permutations, buffer_size=None)
###############################################################################
# Create new stats image with only significant clusters:
good_clusters = np.where(cluster_p_values < .05)[0]
T_obs_plot = np.ma.masked_array(T_obs,
np.invert(clusters[np.squeeze(good_clusters)]))
plt.figure()
for f_image, cmap in zip([T_obs, T_obs_plot], [plt.cm.gray, 'RdBu_r']):
plt.imshow(f_image, cmap=cmap, extent=[times[0], times[-1],
freqs[0], freqs[-1]], aspect='auto',
origin='lower')
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title("Time-locked response for 'modality by location' (%s)\n"
" cluster-level corrected (p <= 0.05)" % ch_name)
plt.show()
###############################################################################
# Now using FDR:
mask, _ = fdr_correction(pvals[2])
T_obs_plot2 = np.ma.masked_array(T_obs, np.invert(mask))
plt.figure()
for f_image, cmap in zip([T_obs, T_obs_plot2], [plt.cm.gray, 'RdBu_r']):
plt.imshow(f_image, cmap=cmap, extent=[times[0], times[-1],
freqs[0], freqs[-1]], aspect='auto',
origin='lower')
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title("Time-locked response for 'modality by location' (%s)\n"
" FDR corrected (p <= 0.05)" % ch_name)
plt.show()
###############################################################################
# Both cluster level and FDR correction help get rid of
# putatively spots we saw in the naive f-images.
| bsd-3-clause |
lizardsystem/lizard-rijnmond | lizard_rijnmond/migrations/0006_auto__chg_field_strategy_name.py | 1 | 2731 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Strategy.name'
db.alter_column('lizard_rijnmond_strategy', 'name', self.gf('django.db.models.fields.CharField')(max_length=128, null=True))
def backwards(self, orm):
# Changing field 'Strategy.name'
db.alter_column('lizard_rijnmond_strategy', 'name', self.gf('django.db.models.fields.CharField')(max_length=20, null=True))
models = {
'lizard_rijnmond.measure': {
'Meta': {'object_name': 'Measure'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'lizard_rijnmond.result': {
'Meta': {'object_name': 'Result'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'measure': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_rijnmond.Measure']", 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'lizard_rijnmond.scenario': {
'Meta': {'object_name': 'Scenario'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
'lizard_rijnmond.segment': {
'Meta': {'object_name': 'Segment'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maintainer': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'the_geom': ('django.contrib.gis.db.models.fields.LineStringField', [], {})
},
'lizard_rijnmond.strategy': {
'Meta': {'object_name': 'Strategy'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['lizard_rijnmond']
| gpl-3.0 |
rec/echomesh | code/python/echomesh/util/string/Split_test.py | 1 | 1508 | from __future__ import absolute_import, division, print_function, unicode_literals
from echomesh.util.string.Split import split_words, pair_split
from echomesh.util.TestCase import TestCase
class SplitTest(TestCase):
def test_split_simple(self):
self.assertEqual(split_words('hello, there!'), ['hello,', 'there!'])
def test_split_empty(self):
self.assertEqual(split_words(''), [])
def test_split_one(self):
self.assertEqual(split_words('one'), ['one'])
def test_split_two(self):
self.assertEqual(split_words('one two'), ['one', 'two'])
def test_split_two_space(self):
self.assertEqual(split_words(' one two'), ['one', 'two'])
def test_split_two_spaces(self):
self.assertEqual(split_words(' one two '), ['one', 'two'])
def test_pair_split_empty(self):
self.assertEqual(pair_split([]), [])
def test_pair_split_one(self):
self.assertEqual(pair_split(split_words('hello')), [('hello', None)])
def test_pair_split_two(self):
self.assertEqual(pair_split(split_words('hello as hell')),
[('hello', 'hell')])
def test_pair_split_three(self):
self.assertEqual(pair_split(split_words('hello as hell dogs as cats')),
[('hello', 'hell'), ('dogs', 'cats')])
def test_pair_split_two_pair(self):
self.assertEqual(pair_split(split_words('hello hell as dogs cats')),
[('hello', 'dogs'), ('hell', 'cats')])
| mit |
Boussadia/weboob | modules/guerrillamail/browser.py | 2 | 2689 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Vincent A
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.browser import BaseBrowser
from weboob.tools.date import datetime
from weboob.tools.parsers.jsonparser import json
from urllib import urlencode
#from .pages import Page1, Page2
__all__ = ['GuerrillamailBrowser']
class GuerrillamailBrowser(BaseBrowser):
PROTOCOL = 'https'
DOMAIN = 'www.guerrillamail.com'
ENCODING = 'utf-8'
def __init__(self, *args, **kw):
kw['parser'] = 'raw'
BaseBrowser.__init__(self, *args, **kw)
def _get_unicode(self, url, *a):
return self.get_document(self.openurl(url, *a)).decode(self.ENCODING, 'replace')
def _get_json(self, url, *a):
j = json.loads(self._get_unicode(url, *a))
return j
def get_mails(self, boxid):
params = {'email_user': boxid, 'lang': 'en', 'domain': 'guerrillamail.com'}
d = self._get_json('https://www.guerrillamail.com/ajax.php?f=set_email_user', urlencode(params))
d = self._get_json('https://www.guerrillamail.com/ajax.php?f=get_email_list&offset=0&domain=guerrillamail.com')
for m in d['list']:
info = {}
info['id'] = m['mail_id']
info['from'] = m['mail_from']
# info['to'] = m['mail_recipient']
info['to'] = '%s@guerrillamail.com' % boxid
info['subject'] = m['mail_subject']
info['datetime'] = datetime.fromtimestamp(int(m['mail_timestamp']))
info['read'] = bool(int(m['mail_read']))
yield info
def get_mail_content(self, mailid):
d = self._get_json('https://www.guerrillamail.com/ajax.php?f=fetch_email&email_id=mr_%s&domain=guerrillamail.com' % mailid)
return d['mail_body']
def send_mail(self, from_, to, subject, body):
params = {'from': from_, 'to': to, 'subject': subject, 'body': body, 'attach': '', 'domain': 'guerrillamail.com'}
self._get_json('https://www.guerrillamail.com/ajax.php?f=send_email', urlencode(params))
| agpl-3.0 |
mastbaum/rat-pac | python/SCons/Tool/yacc.py | 19 | 4762 | """SCons.Tool.yacc
Tool-specific initialization for yacc.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/yacc.py 4043 2009/02/23 09:06:45 scons"
import os.path
import string
import SCons.Defaults
import SCons.Tool
import SCons.Util
YaccAction = SCons.Action.Action("$YACCCOM", "$YACCCOMSTR")
def _yaccEmitter(target, source, env, ysuf, hsuf):
yaccflags = env.subst("$YACCFLAGS", target=target, source=source)
flags = SCons.Util.CLVar(yaccflags)
targetBase, targetExt = os.path.splitext(SCons.Util.to_String(target[0]))
if '.ym' in ysuf: # If using Objective-C
target = [targetBase + ".m"] # the extension is ".m".
# If -d is specified on the command line, yacc will emit a .h
# or .hpp file with the same name as the .c or .cpp output file.
if '-d' in flags:
target.append(targetBase + env.subst(hsuf, target=target, source=source))
# If -g is specified on the command line, yacc will emit a .vcg
# file with the same base name as the .y, .yacc, .ym or .yy file.
if "-g" in flags:
base, ext = os.path.splitext(SCons.Util.to_String(source[0]))
target.append(base + env.subst("$YACCVCGFILESUFFIX"))
# With --defines and --graph, the name of the file is totally defined
# in the options.
fileGenOptions = ["--defines=", "--graph="]
for option in flags:
for fileGenOption in fileGenOptions:
l = len(fileGenOption)
if option[:l] == fileGenOption:
# A file generating option is present, so add the file
# name to the list of targets.
fileName = string.strip(option[l:])
target.append(fileName)
return (target, source)
def yEmitter(target, source, env):
return _yaccEmitter(target, source, env, ['.y', '.yacc'], '$YACCHFILESUFFIX')
def ymEmitter(target, source, env):
return _yaccEmitter(target, source, env, ['.ym'], '$YACCHFILESUFFIX')
def yyEmitter(target, source, env):
return _yaccEmitter(target, source, env, ['.yy'], '$YACCHXXFILESUFFIX')
def generate(env):
"""Add Builders and construction variables for yacc to an Environment."""
c_file, cxx_file = SCons.Tool.createCFileBuilders(env)
# C
c_file.add_action('.y', YaccAction)
c_file.add_emitter('.y', yEmitter)
c_file.add_action('.yacc', YaccAction)
c_file.add_emitter('.yacc', yEmitter)
# Objective-C
c_file.add_action('.ym', YaccAction)
c_file.add_emitter('.ym', ymEmitter)
# C++
cxx_file.add_action('.yy', YaccAction)
cxx_file.add_emitter('.yy', yyEmitter)
env['YACC'] = env.Detect('bison') or 'yacc'
env['YACCFLAGS'] = SCons.Util.CLVar('')
env['YACCCOM'] = '$YACC $YACCFLAGS -o $TARGET $SOURCES'
env['YACCHFILESUFFIX'] = '.h'
# Apparently, OS X now creates file.hpp like everybody else
# I have no idea when it changed; it was fixed in 10.4
#if env['PLATFORM'] == 'darwin':
# # Bison on Mac OS X just appends ".h" to the generated target .cc
# # or .cpp file name. Hooray for delayed expansion of variables.
# env['YACCHXXFILESUFFIX'] = '${TARGET.suffix}.h'
#else:
# env['YACCHXXFILESUFFIX'] = '.hpp'
env['YACCHXXFILESUFFIX'] = '.hpp'
env['YACCVCGFILESUFFIX'] = '.vcg'
def exists(env):
return env.Detect(['bison', 'yacc'])
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| bsd-3-clause |
Webstr-framework/webstr | webstr/patternfly/modal/pages.py | 1 | 1363 | """
Page objects for patternfly/bootstrap modal window.
Modal window is a window which makes itself the only active element on the
page, so that one needs to close it first to access the rest of the page again.
"""
# Copyright 2016 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from webstr.core import WebstrPage
from webstr.patternfly.modal import models as m_modal
class ModalWindow(WebstrPage):
"""
Base page object class for any modal window.
"""
_model = m_modal.ModalWindowModel
_required_elems = ['header', 'body', 'footer', 'title', 'close_btn']
def close(self):
"""
Close the modal windown via default close button in the deader
(a button labeled "X" in the top right corner).
"""
self._model.close_btn.click()
def get_title(self):
return self._model.title.text
| apache-2.0 |
mpare002/HackTech_2017 | env/Lib/site-packages/pip/_vendor/html5lib/sanitizer.py | 805 | 16428 | from __future__ import absolute_import, division, unicode_literals
import re
from xml.sax.saxutils import escape, unescape
from .tokenizer import HTMLTokenizer
from .constants import tokenTypes
class HTMLSanitizerMixin(object):
""" sanitization of XHTML+MathML+SVG and of inline style attributes."""
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video']
mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none']
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph',
'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect',
'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color',
'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords',
'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default',
'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end',
'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers',
'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace',
'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing',
'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend',
'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method',
'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open',
'optimum', 'pattern', 'ping', 'point-size', 'poster', 'pqg', 'preload',
'prompt', 'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min',
'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan',
'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start',
'step', 'style', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx',
'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill',
'fill-opacity', 'fill-rule', 'font-family', 'font-size',
'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from',
'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging',
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end',
'marker-mid', 'marker-start', 'markerHeight', 'markerUnits',
'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset',
'opacity', 'orient', 'origin', 'overline-position',
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount',
'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart',
'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to',
'transform', 'type', 'u1', 'u2', 'underline-position',
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y',
'y1', 'y2', 'zoomAndPan']
attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc', 'poster',
'xlink:href', 'xml:base']
svg_attr_val_allows_ref = ['clip-path', 'color-profile', 'cursor', 'fill',
'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end',
'mask', 'stroke']
svg_allow_local_href = ['altGlyph', 'animate', 'animateColor',
'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter',
'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref',
'set', 'use']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
acceptable_svg_properties = ['fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
acceptable_protocols = ['ed2k', 'ftp', 'http', 'https', 'irc',
'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal',
'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag',
'ssh', 'sftp', 'rtsp', 'afs']
# subclasses may define their own versions of these constants
allowed_elements = acceptable_elements + mathml_elements + svg_elements
allowed_attributes = acceptable_attributes + mathml_attributes + svg_attributes
allowed_css_properties = acceptable_css_properties
allowed_css_keywords = acceptable_css_keywords
allowed_svg_properties = acceptable_svg_properties
allowed_protocols = acceptable_protocols
# Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and
# stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style
# attributes are parsed, and a restricted set, # specified by
# ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through.
# attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified
# in ALLOWED_PROTOCOLS are allowed.
#
# sanitize_html('<script> do_nasty_stuff() </script>')
# => <script> do_nasty_stuff() </script>
# sanitize_html('<a href="javascript: sucker();">Click here for $100</a>')
# => <a>Click here for $100</a>
def sanitize_token(self, token):
# accommodate filters which use token_type differently
token_type = token["type"]
if token_type in list(tokenTypes.keys()):
token_type = tokenTypes[token_type]
if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]):
if token["name"] in self.allowed_elements:
return self.allowed_token(token, token_type)
else:
return self.disallowed_token(token, token_type)
elif token_type == tokenTypes["Comment"]:
pass
else:
return token
def allowed_token(self, token, token_type):
if "data" in token:
attrs = dict([(name, val) for name, val in
token["data"][::-1]
if name in self.allowed_attributes])
for attr in self.attr_val_is_uri:
if attr not in attrs:
continue
val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '',
unescape(attrs[attr])).lower()
# remove replacement characters from unescaped characters
val_unescaped = val_unescaped.replace("\ufffd", "")
if (re.match("^[a-z0-9][-+.a-z0-9]*:", val_unescaped) and
(val_unescaped.split(':')[0] not in
self.allowed_protocols)):
del attrs[attr]
for attr in self.svg_attr_val_allows_ref:
if attr in attrs:
attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
' ',
unescape(attrs[attr]))
if (token["name"] in self.svg_allow_local_href and
'xlink:href' in attrs and re.search('^\s*[^#\s].*',
attrs['xlink:href'])):
del attrs['xlink:href']
if 'style' in attrs:
attrs['style'] = self.sanitize_css(attrs['style'])
token["data"] = [[name, val] for name, val in list(attrs.items())]
return token
def disallowed_token(self, token, token_type):
if token_type == tokenTypes["EndTag"]:
token["data"] = "</%s>" % token["name"]
elif token["data"]:
attrs = ''.join([' %s="%s"' % (k, escape(v)) for k, v in token["data"]])
token["data"] = "<%s%s>" % (token["name"], attrs)
else:
token["data"] = "<%s>" % token["name"]
if token.get("selfClosing"):
token["data"] = token["data"][:-1] + "/>"
if token["type"] in list(tokenTypes.keys()):
token["type"] = "Characters"
else:
token["type"] = tokenTypes["Characters"]
del token["name"]
return token
def sanitize_css(self, style):
# disallow urls
style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style):
return ''
clean = []
for prop, value in re.findall("([-\w]+)\s*:\s*([^:;]*)", style):
if not value:
continue
if prop.lower() in self.allowed_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background', 'border', 'margin',
'padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif prop.lower() in self.allowed_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin):
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=False, lowercaseAttrName=False, parser=None):
# Change case matching defaults as we only output lowercase html anyway
# This solution doesn't seem ideal...
HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet,
lowercaseElementName, lowercaseAttrName, parser=parser)
def __iter__(self):
for token in HTMLTokenizer.__iter__(self):
token = self.sanitize_token(token)
if token:
yield token
| mit |
dims/ironic | ironic/tests/unit/drivers/test_pxe.py | 3 | 14871 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test class for PXE Drivers
"""
import mock
import testtools
from ironic.common import exception
from ironic.drivers.modules import agent
from ironic.drivers.modules.amt import management as amt_management
from ironic.drivers.modules.amt import power as amt_power
from ironic.drivers.modules.amt import vendor as amt_vendor
from ironic.drivers.modules.cimc import management as cimc_management
from ironic.drivers.modules.cimc import power as cimc_power
from ironic.drivers.modules import iboot
from ironic.drivers.modules.ilo import console as ilo_console
from ironic.drivers.modules.ilo import inspect as ilo_inspect
from ironic.drivers.modules.ilo import management as ilo_management
from ironic.drivers.modules.ilo import power as ilo_power
from ironic.drivers.modules import ipminative
from ironic.drivers.modules import ipmitool
from ironic.drivers.modules.irmc import management as irmc_management
from ironic.drivers.modules.irmc import power as irmc_power
from ironic.drivers.modules import iscsi_deploy
from ironic.drivers.modules.msftocs import management as msftocs_management
from ironic.drivers.modules.msftocs import power as msftocs_power
from ironic.drivers.modules import pxe as pxe_module
from ironic.drivers.modules import seamicro
from ironic.drivers.modules import snmp
from ironic.drivers.modules import ssh
from ironic.drivers.modules.ucs import management as ucs_management
from ironic.drivers.modules.ucs import power as ucs_power
from ironic.drivers.modules import virtualbox
from ironic.drivers.modules import wol
from ironic.drivers import pxe
from ironic.drivers import utils
class PXEDriversTestCase(testtools.TestCase):
def test_pxe_ipmitool_driver(self):
driver = pxe.PXEAndIPMIToolDriver()
self.assertIsInstance(driver.power, ipmitool.IPMIPower)
self.assertIsInstance(driver.console, ipmitool.IPMIShellinaboxConsole)
self.assertIsInstance(driver.boot, pxe_module.PXEBoot)
self.assertIsInstance(driver.deploy, iscsi_deploy.ISCSIDeploy)
self.assertIsInstance(driver.management, ipmitool.IPMIManagement)
self.assertIsNone(driver.inspect)
# TODO(rameshg87): Need better way of asserting the routes.
self.assertIsInstance(driver.vendor, utils.MixinVendorInterface)
self.assertIsInstance(driver.raid, agent.AgentRAID)
def test_pxe_ssh_driver(self):
driver = pxe.PXEAndSSHDriver()
self.assertIsInstance(driver.power, ssh.SSHPower)
self.assertIsInstance(driver.boot, pxe_module.PXEBoot)
self.assertIsInstance(driver.deploy, iscsi_deploy.ISCSIDeploy)
self.assertIsInstance(driver.management, ssh.SSHManagement)
self.assertIsInstance(driver.vendor, iscsi_deploy.VendorPassthru)
self.assertIsNone(driver.inspect)
self.assertIsInstance(driver.raid, agent.AgentRAID)
@mock.patch.object(pxe.importutils, 'try_import', spec_set=True,
autospec=True)
def test_pxe_ipminative_driver(self, try_import_mock):
try_import_mock.return_value = True
driver = pxe.PXEAndIPMINativeDriver()
self.assertIsInstance(driver.power, ipminative.NativeIPMIPower)
self.assertIsInstance(driver.console,
ipminative.NativeIPMIShellinaboxConsole)
self.assertIsInstance(driver.boot, pxe_module.PXEBoot)
self.assertIsInstance(driver.deploy, iscsi_deploy.ISCSIDeploy)
self.assertIsInstance(driver.management,
ipminative.NativeIPMIManagement)
# TODO(rameshg87): Need better way of asserting the routes.
self.assertIsInstance(driver.vendor, utils.MixinVendorInterface)
self.assertIsNone(driver.inspect)
self.assertIsInstance(driver.raid, agent.AgentRAID)
@mock.patch.object(pxe.importutils, 'try_import', spec_set=True,
autospec=True)
def test_pxe_ipminative_driver_import_error(self, try_import_mock):
try_import_mock.return_value = False
self.assertRaises(exception.DriverLoadError,
pxe.PXEAndIPMINativeDriver)
@mock.patch.object(pxe.importutils, 'try_import', spec_set=True,
autospec=True)
def test_pxe_seamicro_driver(self, try_import_mock):
try_import_mock.return_value = True
driver = pxe.PXEAndSeaMicroDriver()
self.assertIsInstance(driver.power, seamicro.Power)
self.assertIsInstance(driver.boot, pxe_module.PXEBoot)
self.assertIsInstance(driver.deploy, iscsi_deploy.ISCSIDeploy)
self.assertIsInstance(driver.management, seamicro.Management)
self.assertIsInstance(driver.seamicro_vendor, seamicro.VendorPassthru)
self.assertIsInstance(driver.pxe_vendor, iscsi_deploy.VendorPassthru)
self.assertIsInstance(driver.vendor, utils.MixinVendorInterface)
self.assertIsInstance(driver.console, seamicro.ShellinaboxConsole)
@mock.patch.object(pxe.importutils, 'try_import', spec_set=True,
autospec=True)
def test_pxe_seamicro_driver_import_error(self, try_import_mock):
try_import_mock.return_value = False
self.assertRaises(exception.DriverLoadError,
pxe.PXEAndSeaMicroDriver)
@mock.patch.object(pxe.importutils, 'try_import', spec_set=True,
autospec=True)
def test_pxe_iboot_driver(self, try_import_mock):
try_import_mock.return_value = True
driver = pxe.PXEAndIBootDriver()
self.assertIsInstance(driver.power, iboot.IBootPower)
self.assertIsInstance(driver.boot, pxe_module.PXEBoot)
self.assertIsInstance(driver.deploy, iscsi_deploy.ISCSIDeploy)
self.assertIsInstance(driver.vendor, iscsi_deploy.VendorPassthru)
@mock.patch.object(pxe.importutils, 'try_import', spec_set=True,
autospec=True)
def test_pxe_iboot_driver_import_error(self, try_import_mock):
try_import_mock.return_value = False
self.assertRaises(exception.DriverLoadError,
pxe.PXEAndIBootDriver)
@mock.patch.object(pxe.importutils, 'try_import', spec_set=True,
autospec=True)
def test_pxe_ilo_driver(self, try_import_mock):
try_import_mock.return_value = True
driver = pxe.PXEAndIloDriver()
self.assertIsInstance(driver.power, ilo_power.IloPower)
self.assertIsInstance(driver.boot, pxe_module.PXEBoot)
self.assertIsInstance(driver.deploy, iscsi_deploy.ISCSIDeploy)
self.assertIsInstance(driver.vendor, iscsi_deploy.VendorPassthru)
self.assertIsInstance(driver.console,
ilo_console.IloConsoleInterface)
self.assertIsInstance(driver.management,
ilo_management.IloManagement)
self.assertIsInstance(driver.inspect, ilo_inspect.IloInspect)
self.assertIsInstance(driver.raid, agent.AgentRAID)
@mock.patch.object(pxe.importutils, 'try_import', spec_set=True,
autospec=True)
def test_pxe_ilo_driver_import_error(self, try_import_mock):
try_import_mock.return_value = False
self.assertRaises(exception.DriverLoadError,
pxe.PXEAndIloDriver)
@mock.patch.object(pxe.importutils, 'try_import', spec_set=True,
autospec=True)
def test_pxe_snmp_driver(self, try_import_mock):
try_import_mock.return_value = True
driver = pxe.PXEAndSNMPDriver()
self.assertIsInstance(driver.power, snmp.SNMPPower)
self.assertIsInstance(driver.boot, pxe_module.PXEBoot)
self.assertIsInstance(driver.deploy, iscsi_deploy.ISCSIDeploy)
self.assertIsInstance(driver.vendor, iscsi_deploy.VendorPassthru)
self.assertIsNone(driver.management)
@mock.patch.object(pxe.importutils, 'try_import', spec_set=True,
autospec=True)
def test_pxe_snmp_driver_import_error(self, try_import_mock):
try_import_mock.return_value = False
self.assertRaises(exception.DriverLoadError,
pxe.PXEAndSNMPDriver)
@mock.patch.object(pxe.importutils, 'try_import', spec_set=True,
autospec=True)
def test_pxe_irmc_driver(self, try_import_mock):
try_import_mock.return_value = True
driver = pxe.PXEAndIRMCDriver()
self.assertIsInstance(driver.power, irmc_power.IRMCPower)
self.assertIsInstance(driver.console, ipmitool.IPMIShellinaboxConsole)
self.assertIsInstance(driver.boot, pxe_module.PXEBoot)
self.assertIsInstance(driver.deploy, iscsi_deploy.ISCSIDeploy)
self.assertIsInstance(driver.management,
irmc_management.IRMCManagement)
self.assertIsInstance(driver.vendor, iscsi_deploy.VendorPassthru)
@mock.patch.object(pxe.importutils, 'try_import', spec_set=True,
autospec=True)
def test_pxe_irmc_driver_import_error(self, try_import_mock):
try_import_mock.return_value = False
self.assertRaises(exception.DriverLoadError,
pxe.PXEAndIRMCDriver)
@mock.patch.object(pxe.importutils, 'try_import', spec_set=True,
autospec=True)
def test_pxe_vbox_driver(self, try_import_mock):
try_import_mock.return_value = True
driver = pxe.PXEAndVirtualBoxDriver()
self.assertIsInstance(driver.power, virtualbox.VirtualBoxPower)
self.assertIsInstance(driver.boot, pxe_module.PXEBoot)
self.assertIsInstance(driver.deploy, iscsi_deploy.ISCSIDeploy)
self.assertIsInstance(driver.management,
virtualbox.VirtualBoxManagement)
self.assertIsInstance(driver.vendor, iscsi_deploy.VendorPassthru)
self.assertIsInstance(driver.raid, agent.AgentRAID)
@mock.patch.object(pxe.importutils, 'try_import', spec_set=True,
autospec=True)
def test_pxe_vbox_driver_import_error(self, try_import_mock):
try_import_mock.return_value = False
self.assertRaises(exception.DriverLoadError,
pxe.PXEAndVirtualBoxDriver)
@mock.patch.object(pxe.importutils, 'try_import', spec_set=True,
autospec=True)
def test_pxe_amt_driver(self, try_import_mock):
try_import_mock.return_value = True
driver = pxe.PXEAndAMTDriver()
self.assertIsInstance(driver.power, amt_power.AMTPower)
self.assertIsInstance(driver.boot, pxe_module.PXEBoot)
self.assertIsInstance(driver.deploy, iscsi_deploy.ISCSIDeploy)
self.assertIsInstance(driver.management,
amt_management.AMTManagement)
self.assertIsInstance(driver.vendor, amt_vendor.AMTPXEVendorPassthru)
@mock.patch.object(pxe.importutils, 'try_import', spec_set=True,
autospec=True)
def test_pxe_amt_driver_import_error(self, try_import_mock):
try_import_mock.return_value = False
self.assertRaises(exception.DriverLoadError,
pxe.PXEAndAMTDriver)
@mock.patch.object(pxe.importutils, 'try_import', spec_set=True,
autospec=True)
def test_pxe_msftocs_driver(self, try_import_mock):
try_import_mock.return_value = True
driver = pxe.PXEAndMSFTOCSDriver()
self.assertIsInstance(driver.power, msftocs_power.MSFTOCSPower)
self.assertIsInstance(driver.boot, pxe_module.PXEBoot)
self.assertIsInstance(driver.deploy, iscsi_deploy.ISCSIDeploy)
self.assertIsInstance(driver.management,
msftocs_management.MSFTOCSManagement)
self.assertIsInstance(driver.vendor, iscsi_deploy.VendorPassthru)
@mock.patch.object(pxe.importutils, 'try_import', spec_set=True,
autospec=True)
def test_pxe_ucs_driver(self, try_import_mock):
try_import_mock.return_value = True
driver = pxe.PXEAndUcsDriver()
self.assertIsInstance(driver.power, ucs_power.Power)
self.assertIsInstance(driver.boot, pxe_module.PXEBoot)
self.assertIsInstance(driver.deploy, iscsi_deploy.ISCSIDeploy)
self.assertIsInstance(driver.management,
ucs_management.UcsManagement)
self.assertIsInstance(driver.vendor, iscsi_deploy.VendorPassthru)
@mock.patch.object(pxe.importutils, 'try_import', spec_set=True,
autospec=True)
def test_pxe_ucs_driver_import_error(self, try_import_mock):
try_import_mock.return_value = False
self.assertRaises(exception.DriverLoadError,
pxe.PXEAndUcsDriver)
@mock.patch.object(pxe.importutils, 'try_import', spec_set=True,
autospec=True)
def test_pxe_cimc_driver(self, try_import_mock):
try_import_mock.return_value = True
driver = pxe.PXEAndCIMCDriver()
self.assertIsInstance(driver.power, cimc_power.Power)
self.assertIsInstance(driver.boot, pxe_module.PXEBoot)
self.assertIsInstance(driver.deploy, iscsi_deploy.ISCSIDeploy)
self.assertIsInstance(driver.management,
cimc_management.CIMCManagement)
self.assertIsInstance(driver.vendor, iscsi_deploy.VendorPassthru)
@mock.patch.object(pxe.importutils, 'try_import', spec_set=True,
autospec=True)
def test_pxe_cimc_driver_import_error(self, try_import_mock):
try_import_mock.return_value = False
self.assertRaises(exception.DriverLoadError,
pxe.PXEAndCIMCDriver)
@mock.patch.object(pxe.importutils, 'try_import', spec_set=True,
autospec=True)
def test_pxe_wakeonlan_driver(self, try_import_mock):
try_import_mock.return_value = True
driver = pxe.PXEAndWakeOnLanDriver()
self.assertIsInstance(driver.power, wol.WakeOnLanPower)
self.assertIsInstance(driver.boot, pxe_module.PXEBoot)
self.assertIsInstance(driver.deploy, iscsi_deploy.ISCSIDeploy)
self.assertIsInstance(driver.vendor, iscsi_deploy.VendorPassthru)
| apache-2.0 |
anastasia-tarasova/indy-sdk | docs/how-tos/negotiate-proof/python/step4.py | 2 | 1123 | print_log('\n10. Prover creates Proof for Proof Request\n')
cred_for_attr_1 = creds_for_proof_request['attrs']['attr1_referent']
referent = cred_for_attr_1[0]['referent']
print_log('Referent: ')
pprint.pprint(referent)
chosen_claims_json = json.dumps({
'self_attested_attributes': {},
'requested_attrs': {
'attr1_referent': [referent, True]
},
'requested_predicates': {
'predicate1_referent': referent
}
})
pprint.pprint(json.loads(chosen_claims_json))
schemas_json = json.dumps({referent: schema})
cdefs_json = json.dumps({referent: json.loads(cred_def_json)})
revoc_regs_json = json.dumps({})
proof_json = await
anoncreds.prover_create_proof(prover_wallet_handle, proof_req_json, chosen_claims_json, schemas_json,
'link_secret', cdefs_json, revoc_regs_json)
proof = json.loads(proof_json)
assert 'Alex' == proof['requested_proof']['revealed_attrs']['attr1_referent'][1]
| apache-2.0 |
pullao/Farspeaker | app/main/routes.py | 1 | 1698 | import flask
from . import main
from . import forms
import os
from werkzeug import secure_filename
from flask import Flask, render_template, request
from . import activeCampaign
@main.route('/', methods=['GET', 'POST'])
def index():
""""Login form to enter a room."""
form = forms.LoginForm()
if form.validate_on_submit():
flask.session['name'] = form.name.data
if not form.name.data in activeCampaign.data['participants']:
activeCampaign.data['participants'][form.name.data]=[]
flask.session['room'] = 'main'
return flask.redirect(flask.url_for('.chat'))
elif flask.request.method == 'GET':
form.name.data = flask.session.get('name', '')
#form.room.data = session.get('room', '')
return flask.render_template('index.html', form=form)
@main.route('/chat')
def chat():
"""Chat room. The user's name and room must be stored in
the session."""
name = flask.session.get('name', '')
room = flask.session.get('room', '')
if name == '' or room == '':
return flask.redirect(flask.url_for('.index'))
return flask.render_template('chat.html', name=name, campaign=activeCampaign)#, room=room)
app=Flask(__name__)
# Gives the default upload folder and assigns what happens during the Post request at URL uplaoder
UPLOAD_FOLDER = 'uploads/'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@main.route('/uploader', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
f = request.files['file']
if f:
f.save(os.path.join(app.config['UPLOAD_FOLDER'], secure_filename(f.filename)))
return flask.render_template('chat.html', campaign=activeCampaign) | mit |
ifding/ifding.github.io | stylegan2-ada/dnnlib/tflib/tfutil.py | 1 | 10240 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Miscellaneous helper utils for Tensorflow."""
import os
import numpy as np
import tensorflow as tf
# Silence deprecation warnings from TensorFlow 1.13 onwards
import logging
logging.getLogger('tensorflow').setLevel(logging.ERROR)
import tensorflow.contrib # requires TensorFlow 1.x!
tf.contrib = tensorflow.contrib
from typing import Any, Iterable, List, Union
TfExpression = Union[tf.Tensor, tf.Variable, tf.Operation]
"""A type that represents a valid Tensorflow expression."""
TfExpressionEx = Union[TfExpression, int, float, np.ndarray]
"""A type that can be converted to a valid Tensorflow expression."""
def run(*args, **kwargs) -> Any:
"""Run the specified ops in the default session."""
assert_tf_initialized()
return tf.get_default_session().run(*args, **kwargs)
def is_tf_expression(x: Any) -> bool:
"""Check whether the input is a valid Tensorflow expression, i.e., Tensorflow Tensor, Variable, or Operation."""
return isinstance(x, (tf.Tensor, tf.Variable, tf.Operation))
def shape_to_list(shape: Iterable[tf.Dimension]) -> List[Union[int, None]]:
"""Convert a Tensorflow shape to a list of ints. Retained for backwards compatibility -- use TensorShape.as_list() in new code."""
return [dim.value for dim in shape]
def flatten(x: TfExpressionEx) -> TfExpression:
"""Shortcut function for flattening a tensor."""
with tf.name_scope("Flatten"):
return tf.reshape(x, [-1])
def log2(x: TfExpressionEx) -> TfExpression:
"""Logarithm in base 2."""
with tf.name_scope("Log2"):
return tf.log(x) * np.float32(1.0 / np.log(2.0))
def exp2(x: TfExpressionEx) -> TfExpression:
"""Exponent in base 2."""
with tf.name_scope("Exp2"):
return tf.exp(x * np.float32(np.log(2.0)))
def erfinv(y: TfExpressionEx) -> TfExpression:
"""Inverse of the error function."""
# pylint: disable=no-name-in-module
from tensorflow.python.ops.distributions import special_math
return special_math.erfinv(y)
def lerp(a: TfExpressionEx, b: TfExpressionEx, t: TfExpressionEx) -> TfExpressionEx:
"""Linear interpolation."""
with tf.name_scope("Lerp"):
return a + (b - a) * t
def lerp_clip(a: TfExpressionEx, b: TfExpressionEx, t: TfExpressionEx) -> TfExpression:
"""Linear interpolation with clip."""
with tf.name_scope("LerpClip"):
return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0)
def absolute_name_scope(scope: str) -> tf.name_scope:
"""Forcefully enter the specified name scope, ignoring any surrounding scopes."""
return tf.name_scope(scope + "/")
def absolute_variable_scope(scope: str, **kwargs) -> tf.variable_scope:
"""Forcefully enter the specified variable scope, ignoring any surrounding scopes."""
return tf.variable_scope(tf.VariableScope(name=scope, **kwargs), auxiliary_name_scope=False)
def _sanitize_tf_config(config_dict: dict = None) -> dict:
# Defaults.
cfg = dict()
cfg["rnd.np_random_seed"] = None # Random seed for NumPy. None = keep as is.
cfg["rnd.tf_random_seed"] = "auto" # Random seed for TensorFlow. 'auto' = derive from NumPy random state. None = keep as is.
cfg["env.TF_CPP_MIN_LOG_LEVEL"] = "1" # 0 = Print all available debug info from TensorFlow. 1 = Print warnings and errors, but disable debug info.
cfg["env.HDF5_USE_FILE_LOCKING"] = "FALSE" # Disable HDF5 file locking to avoid concurrency issues with network shares.
cfg["graph_options.place_pruned_graph"] = True # False = Check that all ops are available on the designated device. True = Skip the check for ops that are not used.
cfg["gpu_options.allow_growth"] = True # False = Allocate all GPU memory at the beginning. True = Allocate only as much GPU memory as needed.
# Remove defaults for environment variables that are already set.
for key in list(cfg):
fields = key.split(".")
if fields[0] == "env":
assert len(fields) == 2
if fields[1] in os.environ:
del cfg[key]
# User overrides.
if config_dict is not None:
cfg.update(config_dict)
return cfg
def init_tf(config_dict: dict = None) -> None:
"""Initialize TensorFlow session using good default settings."""
# Skip if already initialized.
if tf.get_default_session() is not None:
return
# Setup config dict and random seeds.
cfg = _sanitize_tf_config(config_dict)
np_random_seed = cfg["rnd.np_random_seed"]
if np_random_seed is not None:
np.random.seed(np_random_seed)
tf_random_seed = cfg["rnd.tf_random_seed"]
if tf_random_seed == "auto":
tf_random_seed = np.random.randint(1 << 31)
if tf_random_seed is not None:
tf.set_random_seed(tf_random_seed)
# Setup environment variables.
for key, value in cfg.items():
fields = key.split(".")
if fields[0] == "env":
assert len(fields) == 2
os.environ[fields[1]] = str(value)
# Create default TensorFlow session.
create_session(cfg, force_as_default=True)
def assert_tf_initialized():
"""Check that TensorFlow session has been initialized."""
if tf.get_default_session() is None:
raise RuntimeError("No default TensorFlow session found. Please call dnnlib.tflib.init_tf().")
def create_session(config_dict: dict = None, force_as_default: bool = False) -> tf.Session:
"""Create tf.Session based on config dict."""
# Setup TensorFlow config proto.
cfg = _sanitize_tf_config(config_dict)
config_proto = tf.ConfigProto()
for key, value in cfg.items():
fields = key.split(".")
if fields[0] not in ["rnd", "env"]:
obj = config_proto
for field in fields[:-1]:
obj = getattr(obj, field)
setattr(obj, fields[-1], value)
# Create session.
session = tf.Session(config=config_proto)
if force_as_default:
# pylint: disable=protected-access
session._default_session = session.as_default()
session._default_session.enforce_nesting = False
session._default_session.__enter__()
return session
def init_uninitialized_vars(target_vars: List[tf.Variable] = None) -> None:
"""Initialize all tf.Variables that have not already been initialized.
Equivalent to the following, but more efficient and does not bloat the tf graph:
tf.variables_initializer(tf.report_uninitialized_variables()).run()
"""
assert_tf_initialized()
if target_vars is None:
target_vars = tf.global_variables()
test_vars = []
test_ops = []
with tf.control_dependencies(None): # ignore surrounding control_dependencies
for var in target_vars:
assert is_tf_expression(var)
try:
tf.get_default_graph().get_tensor_by_name(var.name.replace(":0", "/IsVariableInitialized:0"))
except KeyError:
# Op does not exist => variable may be uninitialized.
test_vars.append(var)
with absolute_name_scope(var.name.split(":")[0]):
test_ops.append(tf.is_variable_initialized(var))
init_vars = [var for var, inited in zip(test_vars, run(test_ops)) if not inited]
run([var.initializer for var in init_vars])
def set_vars(var_to_value_dict: dict) -> None:
"""Set the values of given tf.Variables.
Equivalent to the following, but more efficient and does not bloat the tf graph:
tflib.run([tf.assign(var, value) for var, value in var_to_value_dict.items()]
"""
assert_tf_initialized()
ops = []
feed_dict = {}
for var, value in var_to_value_dict.items():
assert is_tf_expression(var)
try:
setter = tf.get_default_graph().get_tensor_by_name(var.name.replace(":0", "/setter:0")) # look for existing op
except KeyError:
with absolute_name_scope(var.name.split(":")[0]):
with tf.control_dependencies(None): # ignore surrounding control_dependencies
setter = tf.assign(var, tf.placeholder(var.dtype, var.shape, "new_value"), name="setter") # create new setter
ops.append(setter)
feed_dict[setter.op.inputs[1]] = value
run(ops, feed_dict)
def create_var_with_large_initial_value(initial_value: np.ndarray, *args, **kwargs):
"""Create tf.Variable with large initial value without bloating the tf graph."""
assert_tf_initialized()
assert isinstance(initial_value, np.ndarray)
zeros = tf.zeros(initial_value.shape, initial_value.dtype)
var = tf.Variable(zeros, *args, **kwargs)
set_vars({var: initial_value})
return var
def convert_images_from_uint8(images, drange=[-1,1], nhwc_to_nchw=False):
"""Convert a minibatch of images from uint8 to float32 with configurable dynamic range.
Can be used as an input transformation for Network.run().
"""
images = tf.cast(images, tf.float32)
if nhwc_to_nchw:
images = tf.transpose(images, [0, 3, 1, 2])
return images * ((drange[1] - drange[0]) / 255) + drange[0]
def convert_images_to_uint8(images, drange=[-1,1], nchw_to_nhwc=False, shrink=1):
"""Convert a minibatch of images from float32 to uint8 with configurable dynamic range.
Can be used as an output transformation for Network.run().
"""
images = tf.cast(images, tf.float32)
if shrink > 1:
ksize = [1, 1, shrink, shrink]
images = tf.nn.avg_pool(images, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW")
if nchw_to_nhwc:
images = tf.transpose(images, [0, 2, 3, 1])
scale = 255 / (drange[1] - drange[0])
images = images * scale + (0.5 - drange[0] * scale)
return tf.saturate_cast(images, tf.uint8)
| mit |
havellay/ADBMS_ABY | libevent/event_rpcgen.py | 159 | 45222 | #!/usr/bin/env python
#
# Copyright (c) 2005 Niels Provos <provos@citi.umich.edu>
# All rights reserved.
#
# Generates marshaling code based on libevent.
import sys
import re
#
_NAME = "event_rpcgen.py"
_VERSION = "0.1"
_STRUCT_RE = '[a-z][a-z_0-9]*'
# Globals
line_count = 0
white = re.compile(r'^\s+')
cppcomment = re.compile(r'\/\/.*$')
headerdirect = []
cppdirect = []
# Holds everything that makes a struct
class Struct:
def __init__(self, name):
self._name = name
self._entries = []
self._tags = {}
print >>sys.stderr, ' Created struct: %s' % name
def AddEntry(self, entry):
if self._tags.has_key(entry.Tag()):
print >>sys.stderr, ( 'Entry "%s" duplicates tag number '
'%d from "%s" around line %d' ) % (
entry.Name(), entry.Tag(),
self._tags[entry.Tag()], line_count)
sys.exit(1)
self._entries.append(entry)
self._tags[entry.Tag()] = entry.Name()
print >>sys.stderr, ' Added entry: %s' % entry.Name()
def Name(self):
return self._name
def EntryTagName(self, entry):
"""Creates the name inside an enumeration for distinguishing data
types."""
name = "%s_%s" % (self._name, entry.Name())
return name.upper()
def PrintIdented(self, file, ident, code):
"""Takes an array, add indentation to each entry and prints it."""
for entry in code:
print >>file, '%s%s' % (ident, entry)
def PrintTags(self, file):
"""Prints the tag definitions for a structure."""
print >>file, '/* Tag definition for %s */' % self._name
print >>file, 'enum %s_ {' % self._name.lower()
for entry in self._entries:
print >>file, ' %s=%d,' % (self.EntryTagName(entry),
entry.Tag())
print >>file, ' %s_MAX_TAGS' % (self._name.upper())
print >>file, '};\n'
def PrintForwardDeclaration(self, file):
print >>file, 'struct %s;' % self._name
def PrintDeclaration(self, file):
print >>file, '/* Structure declaration for %s */' % self._name
print >>file, 'struct %s_access_ {' % self._name
for entry in self._entries:
dcl = entry.AssignDeclaration('(*%s_assign)' % entry.Name())
dcl.extend(
entry.GetDeclaration('(*%s_get)' % entry.Name()))
if entry.Array():
dcl.extend(
entry.AddDeclaration('(*%s_add)' % entry.Name()))
self.PrintIdented(file, ' ', dcl)
print >>file, '};\n'
print >>file, 'struct %s {' % self._name
print >>file, ' struct %s_access_ *base;\n' % self._name
for entry in self._entries:
dcl = entry.Declaration()
self.PrintIdented(file, ' ', dcl)
print >>file, ''
for entry in self._entries:
print >>file, ' uint8_t %s_set;' % entry.Name()
print >>file, '};\n'
print >>file, \
"""struct %(name)s *%(name)s_new(void);
void %(name)s_free(struct %(name)s *);
void %(name)s_clear(struct %(name)s *);
void %(name)s_marshal(struct evbuffer *, const struct %(name)s *);
int %(name)s_unmarshal(struct %(name)s *, struct evbuffer *);
int %(name)s_complete(struct %(name)s *);
void evtag_marshal_%(name)s(struct evbuffer *, uint32_t,
const struct %(name)s *);
int evtag_unmarshal_%(name)s(struct evbuffer *, uint32_t,
struct %(name)s *);""" % { 'name' : self._name }
# Write a setting function of every variable
for entry in self._entries:
self.PrintIdented(file, '', entry.AssignDeclaration(
entry.AssignFuncName()))
self.PrintIdented(file, '', entry.GetDeclaration(
entry.GetFuncName()))
if entry.Array():
self.PrintIdented(file, '', entry.AddDeclaration(
entry.AddFuncName()))
print >>file, '/* --- %s done --- */\n' % self._name
def PrintCode(self, file):
print >>file, ('/*\n'
' * Implementation of %s\n'
' */\n') % self._name
print >>file, \
'static struct %(name)s_access_ __%(name)s_base = {' % \
{ 'name' : self._name }
for entry in self._entries:
self.PrintIdented(file, ' ', entry.CodeBase())
print >>file, '};\n'
# Creation
print >>file, (
'struct %(name)s *\n'
'%(name)s_new(void)\n'
'{\n'
' struct %(name)s *tmp;\n'
' if ((tmp = malloc(sizeof(struct %(name)s))) == NULL) {\n'
' event_warn("%%s: malloc", __func__);\n'
' return (NULL);\n'
' }\n'
' tmp->base = &__%(name)s_base;\n') % { 'name' : self._name }
for entry in self._entries:
self.PrintIdented(file, ' ', entry.CodeNew('tmp'))
print >>file, ' tmp->%s_set = 0;\n' % entry.Name()
print >>file, (
' return (tmp);\n'
'}\n')
# Adding
for entry in self._entries:
if entry.Array():
self.PrintIdented(file, '', entry.CodeAdd())
print >>file, ''
# Assigning
for entry in self._entries:
self.PrintIdented(file, '', entry.CodeAssign())
print >>file, ''
# Getting
for entry in self._entries:
self.PrintIdented(file, '', entry.CodeGet())
print >>file, ''
# Clearing
print >>file, ( 'void\n'
'%(name)s_clear(struct %(name)s *tmp)\n'
'{'
) % { 'name' : self._name }
for entry in self._entries:
self.PrintIdented(file, ' ', entry.CodeClear('tmp'))
print >>file, '}\n'
# Freeing
print >>file, ( 'void\n'
'%(name)s_free(struct %(name)s *tmp)\n'
'{'
) % { 'name' : self._name }
for entry in self._entries:
self.PrintIdented(file, ' ', entry.CodeFree('tmp'))
print >>file, (' free(tmp);\n'
'}\n')
# Marshaling
print >>file, ('void\n'
'%(name)s_marshal(struct evbuffer *evbuf, '
'const struct %(name)s *tmp)'
'{') % { 'name' : self._name }
for entry in self._entries:
indent = ' '
# Optional entries do not have to be set
if entry.Optional():
indent += ' '
print >>file, ' if (tmp->%s_set) {' % entry.Name()
self.PrintIdented(
file, indent,
entry.CodeMarshal('evbuf', self.EntryTagName(entry), 'tmp'))
if entry.Optional():
print >>file, ' }'
print >>file, '}\n'
# Unmarshaling
print >>file, ('int\n'
'%(name)s_unmarshal(struct %(name)s *tmp, '
' struct evbuffer *evbuf)\n'
'{\n'
' uint32_t tag;\n'
' while (EVBUFFER_LENGTH(evbuf) > 0) {\n'
' if (evtag_peek(evbuf, &tag) == -1)\n'
' return (-1);\n'
' switch (tag) {\n'
) % { 'name' : self._name }
for entry in self._entries:
print >>file, ' case %s:\n' % self.EntryTagName(entry)
if not entry.Array():
print >>file, (
' if (tmp->%s_set)\n'
' return (-1);'
) % (entry.Name())
self.PrintIdented(
file, ' ',
entry.CodeUnmarshal('evbuf',
self.EntryTagName(entry), 'tmp'))
print >>file, ( ' tmp->%s_set = 1;\n' % entry.Name() +
' break;\n' )
print >>file, ( ' default:\n'
' return -1;\n'
' }\n'
' }\n' )
# Check if it was decoded completely
print >>file, ( ' if (%(name)s_complete(tmp) == -1)\n'
' return (-1);'
) % { 'name' : self._name }
# Successfully decoded
print >>file, ( ' return (0);\n'
'}\n')
# Checking if a structure has all the required data
print >>file, (
'int\n'
'%(name)s_complete(struct %(name)s *msg)\n'
'{' ) % { 'name' : self._name }
for entry in self._entries:
self.PrintIdented(
file, ' ',
entry.CodeComplete('msg'))
print >>file, (
' return (0);\n'
'}\n' )
# Complete message unmarshaling
print >>file, (
'int\n'
'evtag_unmarshal_%(name)s(struct evbuffer *evbuf, '
'uint32_t need_tag, struct %(name)s *msg)\n'
'{\n'
' uint32_t tag;\n'
' int res = -1;\n'
'\n'
' struct evbuffer *tmp = evbuffer_new();\n'
'\n'
' if (evtag_unmarshal(evbuf, &tag, tmp) == -1'
' || tag != need_tag)\n'
' goto error;\n'
'\n'
' if (%(name)s_unmarshal(msg, tmp) == -1)\n'
' goto error;\n'
'\n'
' res = 0;\n'
'\n'
' error:\n'
' evbuffer_free(tmp);\n'
' return (res);\n'
'}\n' ) % { 'name' : self._name }
# Complete message marshaling
print >>file, (
'void\n'
'evtag_marshal_%(name)s(struct evbuffer *evbuf, uint32_t tag, '
'const struct %(name)s *msg)\n'
'{\n'
' struct evbuffer *_buf = evbuffer_new();\n'
' assert(_buf != NULL);\n'
' evbuffer_drain(_buf, -1);\n'
' %(name)s_marshal(_buf, msg);\n'
' evtag_marshal(evbuf, tag, EVBUFFER_DATA(_buf), '
'EVBUFFER_LENGTH(_buf));\n'
' evbuffer_free(_buf);\n'
'}\n' ) % { 'name' : self._name }
class Entry:
def __init__(self, type, name, tag):
self._type = type
self._name = name
self._tag = int(tag)
self._ctype = type
self._optional = 0
self._can_be_array = 0
self._array = 0
self._line_count = -1
self._struct = None
self._refname = None
def GetTranslation(self):
return { "parent_name" : self._struct.Name(),
"name" : self._name,
"ctype" : self._ctype,
"refname" : self._refname
}
def SetStruct(self, struct):
self._struct = struct
def LineCount(self):
assert self._line_count != -1
return self._line_count
def SetLineCount(self, number):
self._line_count = number
def Array(self):
return self._array
def Optional(self):
return self._optional
def Tag(self):
return self._tag
def Name(self):
return self._name
def Type(self):
return self._type
def MakeArray(self, yes=1):
self._array = yes
def MakeOptional(self):
self._optional = 1
def GetFuncName(self):
return '%s_%s_get' % (self._struct.Name(), self._name)
def GetDeclaration(self, funcname):
code = [ 'int %s(struct %s *, %s *);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def CodeGet(self):
code = (
'int',
'%(parent_name)s_%(name)s_get(struct %(parent_name)s *msg, '
'%(ctype)s *value)',
'{',
' if (msg->%(name)s_set != 1)',
' return (-1);',
' *value = msg->%(name)s_data;',
' return (0);',
'}' )
code = '\n'.join(code)
code = code % self.GetTranslation()
return code.split('\n')
def AssignFuncName(self):
return '%s_%s_assign' % (self._struct.Name(), self._name)
def AddFuncName(self):
return '%s_%s_add' % (self._struct.Name(), self._name)
def AssignDeclaration(self, funcname):
code = [ 'int %s(struct %s *, const %s);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def CodeAssign(self):
code = [ 'int',
'%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,'
' const %(ctype)s value)',
'{',
' msg->%(name)s_set = 1;',
' msg->%(name)s_data = value;',
' return (0);',
'}' ]
code = '\n'.join(code)
code = code % self.GetTranslation()
return code.split('\n')
def CodeClear(self, structname):
code = [ '%s->%s_set = 0;' % (structname, self.Name()) ]
return code
def CodeComplete(self, structname):
if self.Optional():
return []
code = [ 'if (!%s->%s_set)' % (structname, self.Name()),
' return (-1);' ]
return code
def CodeFree(self, name):
return []
def CodeBase(self):
code = [
'%(parent_name)s_%(name)s_assign,',
'%(parent_name)s_%(name)s_get,'
]
if self.Array():
code.append('%(parent_name)s_%(name)s_add,')
code = '\n'.join(code)
code = code % self.GetTranslation()
return code.split('\n')
def Verify(self):
if self.Array() and not self._can_be_array:
print >>sys.stderr, (
'Entry "%s" cannot be created as an array '
'around line %d' ) % (self._name, self.LineCount())
sys.exit(1)
if not self._struct:
print >>sys.stderr, (
'Entry "%s" does not know which struct it belongs to '
'around line %d' ) % (self._name, self.LineCount())
sys.exit(1)
if self._optional and self._array:
print >>sys.stderr, ( 'Entry "%s" has illegal combination of '
'optional and array around line %d' ) % (
self._name, self.LineCount() )
sys.exit(1)
class EntryBytes(Entry):
def __init__(self, type, name, tag, length):
# Init base class
Entry.__init__(self, type, name, tag)
self._length = length
self._ctype = 'uint8_t'
def GetDeclaration(self, funcname):
code = [ 'int %s(struct %s *, %s **);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def AssignDeclaration(self, funcname):
code = [ 'int %s(struct %s *, const %s *);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def Declaration(self):
dcl = ['uint8_t %s_data[%s];' % (self._name, self._length)]
return dcl
def CodeGet(self):
name = self._name
code = [ 'int',
'%s_%s_get(struct %s *msg, %s **value)' % (
self._struct.Name(), name,
self._struct.Name(), self._ctype),
'{',
' if (msg->%s_set != 1)' % name,
' return (-1);',
' *value = msg->%s_data;' % name,
' return (0);',
'}' ]
return code
def CodeAssign(self):
name = self._name
code = [ 'int',
'%s_%s_assign(struct %s *msg, const %s *value)' % (
self._struct.Name(), name,
self._struct.Name(), self._ctype),
'{',
' msg->%s_set = 1;' % name,
' memcpy(msg->%s_data, value, %s);' % (
name, self._length),
' return (0);',
'}' ]
return code
def CodeUnmarshal(self, buf, tag_name, var_name):
code = [ 'if (evtag_unmarshal_fixed(%s, %s, ' % (buf, tag_name) +
'%s->%s_data, ' % (var_name, self._name) +
'sizeof(%s->%s_data)) == -1) {' % (
var_name, self._name),
' event_warnx("%%s: failed to unmarshal %s", __func__);' % (
self._name ),
' return (-1);',
'}'
]
return code
def CodeMarshal(self, buf, tag_name, var_name):
code = ['evtag_marshal(%s, %s, %s->%s_data, sizeof(%s->%s_data));' % (
buf, tag_name, var_name, self._name, var_name, self._name )]
return code
def CodeClear(self, structname):
code = [ '%s->%s_set = 0;' % (structname, self.Name()),
'memset(%s->%s_data, 0, sizeof(%s->%s_data));' % (
structname, self._name, structname, self._name)]
return code
def CodeNew(self, name):
code = ['memset(%s->%s_data, 0, sizeof(%s->%s_data));' % (
name, self._name, name, self._name)]
return code
def Verify(self):
if not self._length:
print >>sys.stderr, 'Entry "%s" needs a length around line %d' % (
self._name, self.LineCount() )
sys.exit(1)
Entry.Verify(self)
class EntryInt(Entry):
def __init__(self, type, name, tag):
# Init base class
Entry.__init__(self, type, name, tag)
self._ctype = 'uint32_t'
def CodeUnmarshal(self, buf, tag_name, var_name):
code = ['if (evtag_unmarshal_int(%s, %s, &%s->%s_data) == -1) {' % (
buf, tag_name, var_name, self._name),
' event_warnx("%%s: failed to unmarshal %s", __func__);' % (
self._name ),
' return (-1);',
'}' ]
return code
def CodeMarshal(self, buf, tag_name, var_name):
code = ['evtag_marshal_int(%s, %s, %s->%s_data);' % (
buf, tag_name, var_name, self._name)]
return code
def Declaration(self):
dcl = ['uint32_t %s_data;' % self._name]
return dcl
def CodeNew(self, name):
code = ['%s->%s_data = 0;' % (name, self._name)]
return code
class EntryString(Entry):
def __init__(self, type, name, tag):
# Init base class
Entry.__init__(self, type, name, tag)
self._ctype = 'char *'
def CodeAssign(self):
name = self._name
code = """int
%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,
const %(ctype)s value)
{
if (msg->%(name)s_data != NULL)
free(msg->%(name)s_data);
if ((msg->%(name)s_data = strdup(value)) == NULL)
return (-1);
msg->%(name)s_set = 1;
return (0);
}""" % self.GetTranslation()
return code.split('\n')
def CodeUnmarshal(self, buf, tag_name, var_name):
code = ['if (evtag_unmarshal_string(%s, %s, &%s->%s_data) == -1) {' % (
buf, tag_name, var_name, self._name),
' event_warnx("%%s: failed to unmarshal %s", __func__);' % (
self._name ),
' return (-1);',
'}'
]
return code
def CodeMarshal(self, buf, tag_name, var_name):
code = ['evtag_marshal_string(%s, %s, %s->%s_data);' % (
buf, tag_name, var_name, self._name)]
return code
def CodeClear(self, structname):
code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
' free (%s->%s_data);' % (structname, self.Name()),
' %s->%s_data = NULL;' % (structname, self.Name()),
' %s->%s_set = 0;' % (structname, self.Name()),
'}'
]
return code
def CodeNew(self, name):
code = ['%s->%s_data = NULL;' % (name, self._name)]
return code
def CodeFree(self, name):
code = ['if (%s->%s_data != NULL)' % (name, self._name),
' free (%s->%s_data); ' % (name, self._name)]
return code
def Declaration(self):
dcl = ['char *%s_data;' % self._name]
return dcl
class EntryStruct(Entry):
def __init__(self, type, name, tag, refname):
# Init base class
Entry.__init__(self, type, name, tag)
self._can_be_array = 1
self._refname = refname
self._ctype = 'struct %s*' % refname
def CodeGet(self):
name = self._name
code = [ 'int',
'%s_%s_get(struct %s *msg, %s *value)' % (
self._struct.Name(), name,
self._struct.Name(), self._ctype),
'{',
' if (msg->%s_set != 1) {' % name,
' msg->%s_data = %s_new();' % (name, self._refname),
' if (msg->%s_data == NULL)' % name,
' return (-1);',
' msg->%s_set = 1;' % name,
' }',
' *value = msg->%s_data;' % name,
' return (0);',
'}' ]
return code
def CodeAssign(self):
name = self._name
code = """int
%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,
const %(ctype)s value)
{
struct evbuffer *tmp = NULL;
if (msg->%(name)s_set) {
%(refname)s_clear(msg->%(name)s_data);
msg->%(name)s_set = 0;
} else {
msg->%(name)s_data = %(refname)s_new();
if (msg->%(name)s_data == NULL) {
event_warn("%%s: %(refname)s_new()", __func__);
goto error;
}
}
if ((tmp = evbuffer_new()) == NULL) {
event_warn("%%s: evbuffer_new()", __func__);
goto error;
}
%(refname)s_marshal(tmp, value);
if (%(refname)s_unmarshal(msg->%(name)s_data, tmp) == -1) {
event_warnx("%%s: %(refname)s_unmarshal", __func__);
goto error;
}
msg->%(name)s_set = 1;
evbuffer_free(tmp);
return (0);
error:
if (tmp != NULL)
evbuffer_free(tmp);
if (msg->%(name)s_data != NULL) {
%(refname)s_free(msg->%(name)s_data);
msg->%(name)s_data = NULL;
}
return (-1);
}""" % self.GetTranslation()
return code.split('\n')
def CodeComplete(self, structname):
if self.Optional():
code = [ 'if (%s->%s_set && %s_complete(%s->%s_data) == -1)' % (
structname, self.Name(),
self._refname, structname, self.Name()),
' return (-1);' ]
else:
code = [ 'if (%s_complete(%s->%s_data) == -1)' % (
self._refname, structname, self.Name()),
' return (-1);' ]
return code
def CodeUnmarshal(self, buf, tag_name, var_name):
code = ['%s->%s_data = %s_new();' % (
var_name, self._name, self._refname),
'if (%s->%s_data == NULL)' % (var_name, self._name),
' return (-1);',
'if (evtag_unmarshal_%s(%s, %s, %s->%s_data) == -1) {' % (
self._refname, buf, tag_name, var_name, self._name),
' event_warnx("%%s: failed to unmarshal %s", __func__);' % (
self._name ),
' return (-1);',
'}'
]
return code
def CodeMarshal(self, buf, tag_name, var_name):
code = ['evtag_marshal_%s(%s, %s, %s->%s_data);' % (
self._refname, buf, tag_name, var_name, self._name)]
return code
def CodeClear(self, structname):
code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
' %s_free(%s->%s_data);' % (
self._refname, structname, self.Name()),
' %s->%s_data = NULL;' % (structname, self.Name()),
' %s->%s_set = 0;' % (structname, self.Name()),
'}'
]
return code
def CodeNew(self, name):
code = ['%s->%s_data = NULL;' % (name, self._name)]
return code
def CodeFree(self, name):
code = ['if (%s->%s_data != NULL)' % (name, self._name),
' %s_free(%s->%s_data); ' % (
self._refname, name, self._name)]
return code
def Declaration(self):
dcl = ['%s %s_data;' % (self._ctype, self._name)]
return dcl
class EntryVarBytes(Entry):
def __init__(self, type, name, tag):
# Init base class
Entry.__init__(self, type, name, tag)
self._ctype = 'uint8_t *'
def GetDeclaration(self, funcname):
code = [ 'int %s(struct %s *, %s *, uint32_t *);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def AssignDeclaration(self, funcname):
code = [ 'int %s(struct %s *, const %s, uint32_t);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def CodeAssign(self):
name = self._name
code = [ 'int',
'%s_%s_assign(struct %s *msg, '
'const %s value, uint32_t len)' % (
self._struct.Name(), name,
self._struct.Name(), self._ctype),
'{',
' if (msg->%s_data != NULL)' % name,
' free (msg->%s_data);' % name,
' msg->%s_data = malloc(len);' % name,
' if (msg->%s_data == NULL)' % name,
' return (-1);',
' msg->%s_set = 1;' % name,
' msg->%s_length = len;' % name,
' memcpy(msg->%s_data, value, len);' % name,
' return (0);',
'}' ]
return code
def CodeGet(self):
name = self._name
code = [ 'int',
'%s_%s_get(struct %s *msg, %s *value, uint32_t *plen)' % (
self._struct.Name(), name,
self._struct.Name(), self._ctype),
'{',
' if (msg->%s_set != 1)' % name,
' return (-1);',
' *value = msg->%s_data;' % name,
' *plen = msg->%s_length;' % name,
' return (0);',
'}' ]
return code
def CodeUnmarshal(self, buf, tag_name, var_name):
code = ['if (evtag_payload_length(%s, &%s->%s_length) == -1)' % (
buf, var_name, self._name),
' return (-1);',
# We do not want DoS opportunities
'if (%s->%s_length > EVBUFFER_LENGTH(%s))' % (
var_name, self._name, buf),
' return (-1);',
'if ((%s->%s_data = malloc(%s->%s_length)) == NULL)' % (
var_name, self._name, var_name, self._name),
' return (-1);',
'if (evtag_unmarshal_fixed(%s, %s, %s->%s_data, '
'%s->%s_length) == -1) {' % (
buf, tag_name, var_name, self._name, var_name, self._name),
' event_warnx("%%s: failed to unmarshal %s", __func__);' % (
self._name ),
' return (-1);',
'}'
]
return code
def CodeMarshal(self, buf, tag_name, var_name):
code = ['evtag_marshal(%s, %s, %s->%s_data, %s->%s_length);' % (
buf, tag_name, var_name, self._name, var_name, self._name)]
return code
def CodeClear(self, structname):
code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
' free (%s->%s_data);' % (structname, self.Name()),
' %s->%s_data = NULL;' % (structname, self.Name()),
' %s->%s_length = 0;' % (structname, self.Name()),
' %s->%s_set = 0;' % (structname, self.Name()),
'}'
]
return code
def CodeNew(self, name):
code = ['%s->%s_data = NULL;' % (name, self._name),
'%s->%s_length = 0;' % (name, self._name) ]
return code
def CodeFree(self, name):
code = ['if (%s->%s_data != NULL)' % (name, self._name),
' free (%s->%s_data); ' % (name, self._name)]
return code
def Declaration(self):
dcl = ['uint8_t *%s_data;' % self._name,
'uint32_t %s_length;' % self._name]
return dcl
class EntryArray(Entry):
def __init__(self, entry):
# Init base class
Entry.__init__(self, entry._type, entry._name, entry._tag)
self._entry = entry
self._refname = entry._refname
self._ctype = 'struct %s *' % self._refname
def GetDeclaration(self, funcname):
"""Allows direct access to elements of the array."""
translate = self.GetTranslation()
translate["funcname"] = funcname
code = [
'int %(funcname)s(struct %(parent_name)s *, int, %(ctype)s *);' %
translate ]
return code
def AssignDeclaration(self, funcname):
code = [ 'int %s(struct %s *, int, const %s);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def AddDeclaration(self, funcname):
code = [ '%s %s(struct %s *);' % (
self._ctype, funcname, self._struct.Name() ) ]
return code
def CodeGet(self):
code = """int
%(parent_name)s_%(name)s_get(struct %(parent_name)s *msg, int offset,
%(ctype)s *value)
{
if (!msg->%(name)s_set || offset < 0 || offset >= msg->%(name)s_length)
return (-1);
*value = msg->%(name)s_data[offset];
return (0);
}""" % self.GetTranslation()
return code.split('\n')
def CodeAssign(self):
code = """int
%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg, int off,
const %(ctype)s value)
{
struct evbuffer *tmp = NULL;
if (!msg->%(name)s_set || off < 0 || off >= msg->%(name)s_length)
return (-1);
%(refname)s_clear(msg->%(name)s_data[off]);
if ((tmp = evbuffer_new()) == NULL) {
event_warn("%%s: evbuffer_new()", __func__);
goto error;
}
%(refname)s_marshal(tmp, value);
if (%(refname)s_unmarshal(msg->%(name)s_data[off], tmp) == -1) {
event_warnx("%%s: %(refname)s_unmarshal", __func__);
goto error;
}
evbuffer_free(tmp);
return (0);
error:
if (tmp != NULL)
evbuffer_free(tmp);
%(refname)s_clear(msg->%(name)s_data[off]);
return (-1);
}""" % self.GetTranslation()
return code.split('\n')
def CodeAdd(self):
code = \
"""%(ctype)s
%(parent_name)s_%(name)s_add(struct %(parent_name)s *msg)
{
if (++msg->%(name)s_length >= msg->%(name)s_num_allocated) {
int tobe_allocated = msg->%(name)s_num_allocated;
%(ctype)s* new_data = NULL;
tobe_allocated = !tobe_allocated ? 1 : tobe_allocated << 1;
new_data = (%(ctype)s*) realloc(msg->%(name)s_data,
tobe_allocated * sizeof(%(ctype)s));
if (new_data == NULL)
goto error;
msg->%(name)s_data = new_data;
msg->%(name)s_num_allocated = tobe_allocated;
}
msg->%(name)s_data[msg->%(name)s_length - 1] = %(refname)s_new();
if (msg->%(name)s_data[msg->%(name)s_length - 1] == NULL)
goto error;
msg->%(name)s_set = 1;
return (msg->%(name)s_data[msg->%(name)s_length - 1]);
error:
--msg->%(name)s_length;
return (NULL);
}
""" % self.GetTranslation()
return code.split('\n')
def CodeComplete(self, structname):
code = []
translate = self.GetTranslation()
if self.Optional():
code.append( 'if (%(structname)s->%(name)s_set)' % translate)
translate["structname"] = structname
tmp = """{
int i;
for (i = 0; i < %(structname)s->%(name)s_length; ++i) {
if (%(refname)s_complete(%(structname)s->%(name)s_data[i]) == -1)
return (-1);
}
}""" % translate
code.extend(tmp.split('\n'))
return code
def CodeUnmarshal(self, buf, tag_name, var_name):
translate = self.GetTranslation()
translate["var_name"] = var_name
translate["buf"] = buf
translate["tag_name"] = tag_name
code = """if (%(parent_name)s_%(name)s_add(%(var_name)s) == NULL)
return (-1);
if (evtag_unmarshal_%(refname)s(%(buf)s, %(tag_name)s,
%(var_name)s->%(name)s_data[%(var_name)s->%(name)s_length - 1]) == -1) {
--%(var_name)s->%(name)s_length;
event_warnx("%%s: failed to unmarshal %(name)s", __func__);
return (-1);
}""" % translate
return code.split('\n')
def CodeMarshal(self, buf, tag_name, var_name):
code = ['{',
' int i;',
' for (i = 0; i < %s->%s_length; ++i) {' % (
var_name, self._name),
' evtag_marshal_%s(%s, %s, %s->%s_data[i]);' % (
self._refname, buf, tag_name, var_name, self._name),
' }',
'}'
]
return code
def CodeClear(self, structname):
code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
' int i;',
' for (i = 0; i < %s->%s_length; ++i) {' % (
structname, self.Name()),
' %s_free(%s->%s_data[i]);' % (
self._refname, structname, self.Name()),
' }',
' free(%s->%s_data);' % (structname, self.Name()),
' %s->%s_data = NULL;' % (structname, self.Name()),
' %s->%s_set = 0;' % (structname, self.Name()),
' %s->%s_length = 0;' % (structname, self.Name()),
' %s->%s_num_allocated = 0;' % (structname, self.Name()),
'}'
]
return code
def CodeNew(self, name):
code = ['%s->%s_data = NULL;' % (name, self._name),
'%s->%s_length = 0;' % (name, self._name),
'%s->%s_num_allocated = 0;' % (name, self._name)]
return code
def CodeFree(self, name):
code = ['if (%s->%s_data != NULL) {' % (name, self._name),
' int i;',
' for (i = 0; i < %s->%s_length; ++i) {' % (
name, self._name),
' %s_free(%s->%s_data[i]); ' % (
self._refname, name, self._name),
' %s->%s_data[i] = NULL;' % (name, self._name),
' }',
' free(%s->%s_data);' % (name, self._name),
' %s->%s_data = NULL;' % (name, self._name),
' %s->%s_length = 0;' % (name, self._name),
' %s->%s_num_allocated = 0;' % (name, self._name),
'}'
]
return code
def Declaration(self):
dcl = ['struct %s **%s_data;' % (self._refname, self._name),
'int %s_length;' % self._name,
'int %s_num_allocated;' % self._name ]
return dcl
def NormalizeLine(line):
global white
global cppcomment
line = cppcomment.sub('', line)
line = line.strip()
line = white.sub(' ', line)
return line
def ProcessOneEntry(newstruct, entry):
optional = 0
array = 0
entry_type = ''
name = ''
tag = ''
tag_set = None
separator = ''
fixed_length = ''
tokens = entry.split(' ')
while tokens:
token = tokens[0]
tokens = tokens[1:]
if not entry_type:
if not optional and token == 'optional':
optional = 1
continue
if not array and token == 'array':
array = 1
continue
if not entry_type:
entry_type = token
continue
if not name:
res = re.match(r'^([^\[\]]+)(\[.*\])?$', token)
if not res:
print >>sys.stderr, 'Cannot parse name: \"%s\" around %d' % (
entry, line_count)
sys.exit(1)
name = res.group(1)
fixed_length = res.group(2)
if fixed_length:
fixed_length = fixed_length[1:-1]
continue
if not separator:
separator = token
if separator != '=':
print >>sys.stderr, 'Expected "=" after name \"%s\" got %s' % (
name, token)
sys.exit(1)
continue
if not tag_set:
tag_set = 1
if not re.match(r'^(0x)?[0-9]+$', token):
print >>sys.stderr, 'Expected tag number: \"%s\"' % entry
sys.exit(1)
tag = int(token, 0)
continue
print >>sys.stderr, 'Cannot parse \"%s\"' % entry
sys.exit(1)
if not tag_set:
print >>sys.stderr, 'Need tag number: \"%s\"' % entry
sys.exit(1)
# Create the right entry
if entry_type == 'bytes':
if fixed_length:
newentry = EntryBytes(entry_type, name, tag, fixed_length)
else:
newentry = EntryVarBytes(entry_type, name, tag)
elif entry_type == 'int' and not fixed_length:
newentry = EntryInt(entry_type, name, tag)
elif entry_type == 'string' and not fixed_length:
newentry = EntryString(entry_type, name, tag)
else:
res = re.match(r'^struct\[(%s)\]$' % _STRUCT_RE,
entry_type, re.IGNORECASE)
if res:
# References another struct defined in our file
newentry = EntryStruct(entry_type, name, tag, res.group(1))
else:
print >>sys.stderr, 'Bad type: "%s" in "%s"' % (entry_type, entry)
sys.exit(1)
structs = []
if optional:
newentry.MakeOptional()
if array:
newentry.MakeArray()
newentry.SetStruct(newstruct)
newentry.SetLineCount(line_count)
newentry.Verify()
if array:
# We need to encapsulate this entry into a struct
newname = newentry.Name()+ '_array'
# Now borgify the new entry.
newentry = EntryArray(newentry)
newentry.SetStruct(newstruct)
newentry.SetLineCount(line_count)
newentry.MakeArray()
newstruct.AddEntry(newentry)
return structs
def ProcessStruct(data):
tokens = data.split(' ')
# First three tokens are: 'struct' 'name' '{'
newstruct = Struct(tokens[1])
inside = ' '.join(tokens[3:-1])
tokens = inside.split(';')
structs = []
for entry in tokens:
entry = NormalizeLine(entry)
if not entry:
continue
# It's possible that new structs get defined in here
structs.extend(ProcessOneEntry(newstruct, entry))
structs.append(newstruct)
return structs
def GetNextStruct(file):
global line_count
global cppdirect
got_struct = 0
processed_lines = []
have_c_comment = 0
data = ''
while 1:
line = file.readline()
if not line:
break
line_count += 1
line = line[:-1]
if not have_c_comment and re.search(r'/\*', line):
if re.search(r'/\*.*\*/', line):
line = re.sub(r'/\*.*\*/', '', line)
else:
line = re.sub(r'/\*.*$', '', line)
have_c_comment = 1
if have_c_comment:
if not re.search(r'\*/', line):
continue
have_c_comment = 0
line = re.sub(r'^.*\*/', '', line)
line = NormalizeLine(line)
if not line:
continue
if not got_struct:
if re.match(r'#include ["<].*[>"]', line):
cppdirect.append(line)
continue
if re.match(r'^#(if( |def)|endif)', line):
cppdirect.append(line)
continue
if re.match(r'^#define', line):
headerdirect.append(line)
continue
if not re.match(r'^struct %s {$' % _STRUCT_RE,
line, re.IGNORECASE):
print >>sys.stderr, 'Missing struct on line %d: %s' % (
line_count, line)
sys.exit(1)
else:
got_struct = 1
data += line
continue
# We are inside the struct
tokens = line.split('}')
if len(tokens) == 1:
data += ' ' + line
continue
if len(tokens[1]):
print >>sys.stderr, 'Trailing garbage after struct on line %d' % (
line_count )
sys.exit(1)
# We found the end of the struct
data += ' %s}' % tokens[0]
break
# Remove any comments, that might be in there
data = re.sub(r'/\*.*\*/', '', data)
return data
def Parse(file):
"""
Parses the input file and returns C code and corresponding header file.
"""
entities = []
while 1:
# Just gets the whole struct nicely formatted
data = GetNextStruct(file)
if not data:
break
entities.extend(ProcessStruct(data))
return entities
def GuardName(name):
name = '_'.join(name.split('.'))
name = '_'.join(name.split('/'))
guard = '_'+name.upper()+'_'
return guard
def HeaderPreamble(name):
guard = GuardName(name)
pre = (
'/*\n'
' * Automatically generated from %s\n'
' */\n\n'
'#ifndef %s\n'
'#define %s\n\n' ) % (
name, guard, guard)
# insert stdint.h - let's hope everyone has it
pre += (
'#include <event-config.h>\n'
'#ifdef _EVENT_HAVE_STDINT_H\n'
'#include <stdint.h>\n'
'#endif\n' )
for statement in headerdirect:
pre += '%s\n' % statement
if headerdirect:
pre += '\n'
pre += (
'#define EVTAG_HAS(msg, member) ((msg)->member##_set == 1)\n'
'#ifdef __GNUC__\n'
'#define EVTAG_ASSIGN(msg, member, args...) '
'(*(msg)->base->member##_assign)(msg, ## args)\n'
'#define EVTAG_GET(msg, member, args...) '
'(*(msg)->base->member##_get)(msg, ## args)\n'
'#else\n'
'#define EVTAG_ASSIGN(msg, member, ...) '
'(*(msg)->base->member##_assign)(msg, ## __VA_ARGS__)\n'
'#define EVTAG_GET(msg, member, ...) '
'(*(msg)->base->member##_get)(msg, ## __VA_ARGS__)\n'
'#endif\n'
'#define EVTAG_ADD(msg, member) (*(msg)->base->member##_add)(msg)\n'
'#define EVTAG_LEN(msg, member) ((msg)->member##_length)\n'
)
return pre
def HeaderPostamble(name):
guard = GuardName(name)
return '#endif /* %s */' % guard
def BodyPreamble(name):
global _NAME
global _VERSION
header_file = '.'.join(name.split('.')[:-1]) + '.gen.h'
pre = ( '/*\n'
' * Automatically generated from %s\n'
' * by %s/%s. DO NOT EDIT THIS FILE.\n'
' */\n\n' ) % (name, _NAME, _VERSION)
pre += ( '#include <sys/types.h>\n'
'#include <sys/time.h>\n'
'#include <stdlib.h>\n'
'#include <string.h>\n'
'#include <assert.h>\n'
'#include <event.h>\n\n' )
for statement in cppdirect:
pre += '%s\n' % statement
pre += '\n#include "%s"\n\n' % header_file
pre += 'void event_err(int eval, const char *fmt, ...);\n'
pre += 'void event_warn(const char *fmt, ...);\n'
pre += 'void event_errx(int eval, const char *fmt, ...);\n'
pre += 'void event_warnx(const char *fmt, ...);\n\n'
return pre
def main(argv):
if len(argv) < 2 or not argv[1]:
print >>sys.stderr, 'Need RPC description file as first argument.'
sys.exit(1)
filename = argv[1]
ext = filename.split('.')[-1]
if ext != 'rpc':
print >>sys.stderr, 'Unrecognized file extension: %s' % ext
sys.exit(1)
print >>sys.stderr, 'Reading \"%s\"' % filename
fp = open(filename, 'r')
entities = Parse(fp)
fp.close()
header_file = '.'.join(filename.split('.')[:-1]) + '.gen.h'
impl_file = '.'.join(filename.split('.')[:-1]) + '.gen.c'
print >>sys.stderr, '... creating "%s"' % header_file
header_fp = open(header_file, 'w')
print >>header_fp, HeaderPreamble(filename)
# Create forward declarations: allows other structs to reference
# each other
for entry in entities:
entry.PrintForwardDeclaration(header_fp)
print >>header_fp, ''
for entry in entities:
entry.PrintTags(header_fp)
entry.PrintDeclaration(header_fp)
print >>header_fp, HeaderPostamble(filename)
header_fp.close()
print >>sys.stderr, '... creating "%s"' % impl_file
impl_fp = open(impl_file, 'w')
print >>impl_fp, BodyPreamble(filename)
for entry in entities:
entry.PrintCode(impl_fp)
impl_fp.close()
if __name__ == '__main__':
main(sys.argv)
| gpl-2.0 |
shadanan/hevante-points | src/requests/packages/chardet/escprober.py | 2936 | 3187 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .escsm import (HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel,
ISO2022KRSMModel)
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .compat import wrap_ord
class EscCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = [
CodingStateMachine(HZSMModel),
CodingStateMachine(ISO2022CNSMModel),
CodingStateMachine(ISO2022JPSMModel),
CodingStateMachine(ISO2022KRSMModel)
]
self.reset()
def reset(self):
CharSetProber.reset(self)
for codingSM in self._mCodingSM:
if not codingSM:
continue
codingSM.active = True
codingSM.reset()
self._mActiveSM = len(self._mCodingSM)
self._mDetectedCharset = None
def get_charset_name(self):
return self._mDetectedCharset
def get_confidence(self):
if self._mDetectedCharset:
return 0.99
else:
return 0.00
def feed(self, aBuf):
for c in aBuf:
# PY3K: aBuf is a byte array, so c is an int, not a byte
for codingSM in self._mCodingSM:
if not codingSM:
continue
if not codingSM.active:
continue
codingState = codingSM.next_state(wrap_ord(c))
if codingState == constants.eError:
codingSM.active = False
self._mActiveSM -= 1
if self._mActiveSM <= 0:
self._mState = constants.eNotMe
return self.get_state()
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
self._mDetectedCharset = codingSM.get_coding_state_machine() # nopep8
return self.get_state()
return self.get_state()
| mit |
loansindi/linux | tools/perf/scripts/python/export-to-postgresql.py | 617 | 16128 | # export-to-postgresql.py: export perf data to a postgresql database
# Copyright (c) 2014, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
import os
import sys
import struct
import datetime
from PySide.QtSql import *
# Need to access PostgreSQL C library directly to use COPY FROM STDIN
from ctypes import *
libpq = CDLL("libpq.so.5")
PQconnectdb = libpq.PQconnectdb
PQconnectdb.restype = c_void_p
PQfinish = libpq.PQfinish
PQstatus = libpq.PQstatus
PQexec = libpq.PQexec
PQexec.restype = c_void_p
PQresultStatus = libpq.PQresultStatus
PQputCopyData = libpq.PQputCopyData
PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
PQputCopyEnd = libpq.PQputCopyEnd
PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
# These perf imports are not used at present
#from perf_trace_context import *
#from Core import *
perf_db_export_mode = True
perf_db_export_calls = False
def usage():
print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>]"
print >> sys.stderr, "where: columns 'all' or 'branches'"
print >> sys.stderr, " calls 'calls' => create calls table"
raise Exception("Too few arguments")
if (len(sys.argv) < 2):
usage()
dbname = sys.argv[1]
if (len(sys.argv) >= 3):
columns = sys.argv[2]
else:
columns = "all"
if columns not in ("all", "branches"):
usage()
branches = (columns == "branches")
if (len(sys.argv) >= 4):
if (sys.argv[3] == "calls"):
perf_db_export_calls = True
else:
usage()
output_dir_name = os.getcwd() + "/" + dbname + "-perf-data"
os.mkdir(output_dir_name)
def do_query(q, s):
if (q.exec_(s)):
return
raise Exception("Query failed: " + q.lastError().text())
print datetime.datetime.today(), "Creating database..."
db = QSqlDatabase.addDatabase('QPSQL')
query = QSqlQuery(db)
db.setDatabaseName('postgres')
db.open()
try:
do_query(query, 'CREATE DATABASE ' + dbname)
except:
os.rmdir(output_dir_name)
raise
query.finish()
query.clear()
db.close()
db.setDatabaseName(dbname)
db.open()
query = QSqlQuery(db)
do_query(query, 'SET client_min_messages TO WARNING')
do_query(query, 'CREATE TABLE selected_events ('
'id bigint NOT NULL,'
'name varchar(80))')
do_query(query, 'CREATE TABLE machines ('
'id bigint NOT NULL,'
'pid integer,'
'root_dir varchar(4096))')
do_query(query, 'CREATE TABLE threads ('
'id bigint NOT NULL,'
'machine_id bigint,'
'process_id bigint,'
'pid integer,'
'tid integer)')
do_query(query, 'CREATE TABLE comms ('
'id bigint NOT NULL,'
'comm varchar(16))')
do_query(query, 'CREATE TABLE comm_threads ('
'id bigint NOT NULL,'
'comm_id bigint,'
'thread_id bigint)')
do_query(query, 'CREATE TABLE dsos ('
'id bigint NOT NULL,'
'machine_id bigint,'
'short_name varchar(256),'
'long_name varchar(4096),'
'build_id varchar(64))')
do_query(query, 'CREATE TABLE symbols ('
'id bigint NOT NULL,'
'dso_id bigint,'
'sym_start bigint,'
'sym_end bigint,'
'binding integer,'
'name varchar(2048))')
do_query(query, 'CREATE TABLE branch_types ('
'id integer NOT NULL,'
'name varchar(80))')
if branches:
do_query(query, 'CREATE TABLE samples ('
'id bigint NOT NULL,'
'evsel_id bigint,'
'machine_id bigint,'
'thread_id bigint,'
'comm_id bigint,'
'dso_id bigint,'
'symbol_id bigint,'
'sym_offset bigint,'
'ip bigint,'
'time bigint,'
'cpu integer,'
'to_dso_id bigint,'
'to_symbol_id bigint,'
'to_sym_offset bigint,'
'to_ip bigint,'
'branch_type integer,'
'in_tx boolean)')
else:
do_query(query, 'CREATE TABLE samples ('
'id bigint NOT NULL,'
'evsel_id bigint,'
'machine_id bigint,'
'thread_id bigint,'
'comm_id bigint,'
'dso_id bigint,'
'symbol_id bigint,'
'sym_offset bigint,'
'ip bigint,'
'time bigint,'
'cpu integer,'
'to_dso_id bigint,'
'to_symbol_id bigint,'
'to_sym_offset bigint,'
'to_ip bigint,'
'period bigint,'
'weight bigint,'
'transaction bigint,'
'data_src bigint,'
'branch_type integer,'
'in_tx boolean)')
if perf_db_export_calls:
do_query(query, 'CREATE TABLE call_paths ('
'id bigint NOT NULL,'
'parent_id bigint,'
'symbol_id bigint,'
'ip bigint)')
do_query(query, 'CREATE TABLE calls ('
'id bigint NOT NULL,'
'thread_id bigint,'
'comm_id bigint,'
'call_path_id bigint,'
'call_time bigint,'
'return_time bigint,'
'branch_count bigint,'
'call_id bigint,'
'return_id bigint,'
'parent_call_path_id bigint,'
'flags integer)')
do_query(query, 'CREATE VIEW samples_view AS '
'SELECT '
'id,'
'time,'
'cpu,'
'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
'(SELECT name FROM selected_events WHERE id = evsel_id) AS event,'
'to_hex(ip) AS ip_hex,'
'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
'sym_offset,'
'(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,'
'to_hex(to_ip) AS to_ip_hex,'
'(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,'
'to_sym_offset,'
'(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,'
'(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,'
'in_tx'
' FROM samples')
file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0)
file_trailer = "\377\377"
def open_output_file(file_name):
path_name = output_dir_name + "/" + file_name
file = open(path_name, "w+")
file.write(file_header)
return file
def close_output_file(file):
file.write(file_trailer)
file.close()
def copy_output_file_direct(file, table_name):
close_output_file(file)
sql = "COPY " + table_name + " FROM '" + file.name + "' (FORMAT 'binary')"
do_query(query, sql)
# Use COPY FROM STDIN because security may prevent postgres from accessing the files directly
def copy_output_file(file, table_name):
conn = PQconnectdb("dbname = " + dbname)
if (PQstatus(conn)):
raise Exception("COPY FROM STDIN PQconnectdb failed")
file.write(file_trailer)
file.seek(0)
sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')"
res = PQexec(conn, sql)
if (PQresultStatus(res) != 4):
raise Exception("COPY FROM STDIN PQexec failed")
data = file.read(65536)
while (len(data)):
ret = PQputCopyData(conn, data, len(data))
if (ret != 1):
raise Exception("COPY FROM STDIN PQputCopyData failed, error " + str(ret))
data = file.read(65536)
ret = PQputCopyEnd(conn, None)
if (ret != 1):
raise Exception("COPY FROM STDIN PQputCopyEnd failed, error " + str(ret))
PQfinish(conn)
def remove_output_file(file):
name = file.name
file.close()
os.unlink(name)
evsel_file = open_output_file("evsel_table.bin")
machine_file = open_output_file("machine_table.bin")
thread_file = open_output_file("thread_table.bin")
comm_file = open_output_file("comm_table.bin")
comm_thread_file = open_output_file("comm_thread_table.bin")
dso_file = open_output_file("dso_table.bin")
symbol_file = open_output_file("symbol_table.bin")
branch_type_file = open_output_file("branch_type_table.bin")
sample_file = open_output_file("sample_table.bin")
if perf_db_export_calls:
call_path_file = open_output_file("call_path_table.bin")
call_file = open_output_file("call_table.bin")
def trace_begin():
print datetime.datetime.today(), "Writing to intermediate files..."
# id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
evsel_table(0, "unknown")
machine_table(0, 0, "unknown")
thread_table(0, 0, 0, -1, -1)
comm_table(0, "unknown")
dso_table(0, 0, "unknown", "unknown", "")
symbol_table(0, 0, 0, 0, 0, "unknown")
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
if perf_db_export_calls:
call_path_table(0, 0, 0, 0)
unhandled_count = 0
def trace_end():
print datetime.datetime.today(), "Copying to database..."
copy_output_file(evsel_file, "selected_events")
copy_output_file(machine_file, "machines")
copy_output_file(thread_file, "threads")
copy_output_file(comm_file, "comms")
copy_output_file(comm_thread_file, "comm_threads")
copy_output_file(dso_file, "dsos")
copy_output_file(symbol_file, "symbols")
copy_output_file(branch_type_file, "branch_types")
copy_output_file(sample_file, "samples")
if perf_db_export_calls:
copy_output_file(call_path_file, "call_paths")
copy_output_file(call_file, "calls")
print datetime.datetime.today(), "Removing intermediate files..."
remove_output_file(evsel_file)
remove_output_file(machine_file)
remove_output_file(thread_file)
remove_output_file(comm_file)
remove_output_file(comm_thread_file)
remove_output_file(dso_file)
remove_output_file(symbol_file)
remove_output_file(branch_type_file)
remove_output_file(sample_file)
if perf_db_export_calls:
remove_output_file(call_path_file)
remove_output_file(call_file)
os.rmdir(output_dir_name)
print datetime.datetime.today(), "Adding primary keys"
do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE comms ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE comm_threads ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE dsos ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE symbols ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE branch_types ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE samples ADD PRIMARY KEY (id)')
if perf_db_export_calls:
do_query(query, 'ALTER TABLE call_paths ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')
print datetime.datetime.today(), "Adding foreign keys"
do_query(query, 'ALTER TABLE threads '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)')
do_query(query, 'ALTER TABLE comm_threads '
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id)')
do_query(query, 'ALTER TABLE dsos '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id)')
do_query(query, 'ALTER TABLE symbols '
'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id)')
do_query(query, 'ALTER TABLE samples '
'ADD CONSTRAINT evselfk FOREIGN KEY (evsel_id) REFERENCES selected_events (id),'
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id),'
'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id),'
'ADD CONSTRAINT todsofk FOREIGN KEY (to_dso_id) REFERENCES dsos (id),'
'ADD CONSTRAINT tosymbolfk FOREIGN KEY (to_symbol_id) REFERENCES symbols (id)')
if perf_db_export_calls:
do_query(query, 'ALTER TABLE call_paths '
'ADD CONSTRAINT parentfk FOREIGN KEY (parent_id) REFERENCES call_paths (id),'
'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id)')
do_query(query, 'ALTER TABLE calls '
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT call_pathfk FOREIGN KEY (call_path_id) REFERENCES call_paths (id),'
'ADD CONSTRAINT callfk FOREIGN KEY (call_id) REFERENCES samples (id),'
'ADD CONSTRAINT returnfk FOREIGN KEY (return_id) REFERENCES samples (id),'
'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)')
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
if (unhandled_count):
print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
print datetime.datetime.today(), "Done"
def trace_unhandled(event_name, context, event_fields_dict):
global unhandled_count
unhandled_count += 1
def sched__sched_switch(*x):
pass
def evsel_table(evsel_id, evsel_name, *x):
n = len(evsel_name)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name)
evsel_file.write(value)
def machine_table(machine_id, pid, root_dir, *x):
n = len(root_dir)
fmt = "!hiqiii" + str(n) + "s"
value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir)
machine_file.write(value)
def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
value = struct.pack("!hiqiqiqiiii", 5, 8, thread_id, 8, machine_id, 8, process_id, 4, pid, 4, tid)
thread_file.write(value)
def comm_table(comm_id, comm_str, *x):
n = len(comm_str)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, comm_id, n, comm_str)
comm_file.write(value)
def comm_thread_table(comm_thread_id, comm_id, thread_id, *x):
fmt = "!hiqiqiq"
value = struct.pack(fmt, 3, 8, comm_thread_id, 8, comm_id, 8, thread_id)
comm_thread_file.write(value)
def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
n1 = len(short_name)
n2 = len(long_name)
n3 = len(build_id)
fmt = "!hiqiqi" + str(n1) + "si" + str(n2) + "si" + str(n3) + "s"
value = struct.pack(fmt, 5, 8, dso_id, 8, machine_id, n1, short_name, n2, long_name, n3, build_id)
dso_file.write(value)
def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x):
n = len(symbol_name)
fmt = "!hiqiqiqiqiii" + str(n) + "s"
value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name)
symbol_file.write(value)
def branch_type_table(branch_type, name, *x):
n = len(name)
fmt = "!hiii" + str(n) + "s"
value = struct.pack(fmt, 2, 4, branch_type, n, name)
branch_type_file.write(value)
def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, *x):
if branches:
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiB", 17, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx)
else:
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiB", 21, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx)
sample_file.write(value)
def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
fmt = "!hiqiqiqiq"
value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip)
call_path_file.write(value)
def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, *x):
fmt = "!hiqiqiqiqiqiqiqiqiqiqii"
value = struct.pack(fmt, 11, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags)
call_file.write(value)
| gpl-2.0 |
osvalr/odoo | addons/account_budget/account_budget.py | 194 | 9368 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import date, datetime
from openerp.osv import fields, osv
from openerp.tools import ustr, DEFAULT_SERVER_DATE_FORMAT
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
# ---------------------------------------------------------
# Utils
# ---------------------------------------------------------
def strToDate(dt):
return date(int(dt[0:4]), int(dt[5:7]), int(dt[8:10]))
def strToDatetime(strdate):
return datetime.strptime(strdate, DEFAULT_SERVER_DATE_FORMAT)
# ---------------------------------------------------------
# Budgets
# ---------------------------------------------------------
class account_budget_post(osv.osv):
_name = "account.budget.post"
_description = "Budgetary Position"
_columns = {
'code': fields.char('Code', size=64, required=True),
'name': fields.char('Name', required=True),
'account_ids': fields.many2many('account.account', 'account_budget_rel', 'budget_id', 'account_id', 'Accounts'),
'crossovered_budget_line': fields.one2many('crossovered.budget.lines', 'general_budget_id', 'Budget Lines'),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.budget.post', context=c)
}
_order = "name"
class crossovered_budget(osv.osv):
_name = "crossovered.budget"
_description = "Budget"
_columns = {
'name': fields.char('Name', required=True, states={'done':[('readonly',True)]}),
'code': fields.char('Code', size=16, required=True, states={'done':[('readonly',True)]}),
'creating_user_id': fields.many2one('res.users', 'Responsible User'),
'validating_user_id': fields.many2one('res.users', 'Validate User', readonly=True),
'date_from': fields.date('Start Date', required=True, states={'done':[('readonly',True)]}),
'date_to': fields.date('End Date', required=True, states={'done':[('readonly',True)]}),
'state' : fields.selection([('draft','Draft'),('cancel', 'Cancelled'),('confirm','Confirmed'),('validate','Validated'),('done','Done')], 'Status', select=True, required=True, readonly=True, copy=False),
'crossovered_budget_line': fields.one2many('crossovered.budget.lines', 'crossovered_budget_id', 'Budget Lines', states={'done':[('readonly',True)]}, copy=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
_defaults = {
'state': 'draft',
'creating_user_id': lambda self, cr, uid, context: uid,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.budget.post', context=c)
}
def budget_confirm(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {
'state': 'confirm'
})
return True
def budget_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {
'state': 'draft'
})
return True
def budget_validate(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {
'state': 'validate',
'validating_user_id': uid,
})
return True
def budget_cancel(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {
'state': 'cancel'
})
return True
def budget_done(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {
'state': 'done'
})
return True
class crossovered_budget_lines(osv.osv):
def _prac_amt(self, cr, uid, ids, context=None):
res = {}
result = 0.0
if context is None:
context = {}
account_obj = self.pool.get('account.account')
for line in self.browse(cr, uid, ids, context=context):
acc_ids = [x.id for x in line.general_budget_id.account_ids]
if not acc_ids:
raise osv.except_osv(_('Error!'),_("The Budget '%s' has no accounts!") % ustr(line.general_budget_id.name))
acc_ids = account_obj._get_children_and_consol(cr, uid, acc_ids, context=context)
date_to = line.date_to
date_from = line.date_from
if line.analytic_account_id.id:
cr.execute("SELECT SUM(amount) FROM account_analytic_line WHERE account_id=%s AND (date "
"between to_date(%s,'yyyy-mm-dd') AND to_date(%s,'yyyy-mm-dd')) AND "
"general_account_id=ANY(%s)", (line.analytic_account_id.id, date_from, date_to,acc_ids,))
result = cr.fetchone()[0]
if result is None:
result = 0.00
res[line.id] = result
return res
def _prac(self, cr, uid, ids, name, args, context=None):
res={}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = self._prac_amt(cr, uid, [line.id], context=context)[line.id]
return res
def _theo_amt(self, cr, uid, ids, context=None):
if context is None:
context = {}
res = {}
for line in self.browse(cr, uid, ids, context=context):
today = datetime.now()
if line.paid_date:
if strToDate(line.date_to) <= strToDate(line.paid_date):
theo_amt = 0.00
else:
theo_amt = line.planned_amount
else:
line_timedelta = strToDatetime(line.date_to) - strToDatetime(line.date_from)
elapsed_timedelta = today - (strToDatetime(line.date_from))
if elapsed_timedelta.days < 0:
# If the budget line has not started yet, theoretical amount should be zero
theo_amt = 0.00
elif line_timedelta.days > 0 and today < strToDatetime(line.date_to):
# If today is between the budget line date_from and date_to
theo_amt = (elapsed_timedelta.total_seconds() / line_timedelta.total_seconds()) * line.planned_amount
else:
theo_amt = line.planned_amount
res[line.id] = theo_amt
return res
def _theo(self, cr, uid, ids, name, args, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = self._theo_amt(cr, uid, [line.id], context=context)[line.id]
return res
def _perc(self, cr, uid, ids, name, args, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
if line.theoritical_amount <> 0.00:
res[line.id] = float((line.practical_amount or 0.0) / line.theoritical_amount) * 100
else:
res[line.id] = 0.00
return res
_name = "crossovered.budget.lines"
_description = "Budget Line"
_columns = {
'crossovered_budget_id': fields.many2one('crossovered.budget', 'Budget', ondelete='cascade', select=True, required=True),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'general_budget_id': fields.many2one('account.budget.post', 'Budgetary Position',required=True),
'date_from': fields.date('Start Date', required=True),
'date_to': fields.date('End Date', required=True),
'paid_date': fields.date('Paid Date'),
'planned_amount':fields.float('Planned Amount', required=True, digits_compute=dp.get_precision('Account')),
'practical_amount':fields.function(_prac, string='Practical Amount', type='float', digits_compute=dp.get_precision('Account')),
'theoritical_amount':fields.function(_theo, string='Theoretical Amount', type='float', digits_compute=dp.get_precision('Account')),
'percentage':fields.function(_perc, string='Percentage', type='float'),
'company_id': fields.related('crossovered_budget_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True)
}
class account_analytic_account(osv.osv):
_inherit = "account.analytic.account"
_columns = {
'crossovered_budget_line': fields.one2many('crossovered.budget.lines', 'analytic_account_id', 'Budget Lines'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ndawe/pyAMI | pyAMI/extern/ZSI/generate/commands.py | 1 | 13219 | ############################################################################
# Joshua Boverhof<JRBoverhof@lbl.gov>, LBNL
# Monte Goode <MMGoode@lbl.gov>, LBNL
# See Copyright for copyright notice!
############################################################################
import exceptions, sys, optparse, os, warnings
from operator import xor
import ZSI
from ConfigParser import ConfigParser
from ZSI.generate.wsdl2python import WriteServiceModule, ServiceDescription
from ZSI.wstools import WSDLTools, XMLSchema
from ZSI.wstools.logging import setBasicLoggerDEBUG
from ZSI.generate import containers, utility
from ZSI.generate.utility import NCName_to_ClassName as NC_to_CN, TextProtect
from ZSI.generate.wsdl2dispatch import ServiceModuleWriter as ServiceDescription
from ZSI.generate.wsdl2dispatch import DelAuthServiceModuleWriter as DelAuthServiceDescription
from ZSI.generate.wsdl2dispatch import WSAServiceModuleWriter as ServiceDescriptionWSA
from ZSI.generate.wsdl2dispatch import DelAuthWSAServiceModuleWriter as DelAuthServiceDescriptionWSA
warnings.filterwarnings('ignore', '', exceptions.UserWarning)
def SetDebugCallback(option, opt, value, parser, *args, **kwargs):
setBasicLoggerDEBUG()
warnings.resetwarnings()
def SetPyclassMetaclass(option, opt, value, parser, *args, **kwargs):
"""set up pyclass metaclass for complexTypes"""
from ZSI.generate.containers import ServiceHeaderContainer, TypecodeContainerBase, TypesHeaderContainer
TypecodeContainerBase.metaclass = kwargs['metaclass']
TypesHeaderContainer.imports.append(\
'from %(module)s import %(metaclass)s' %kwargs
)
ServiceHeaderContainer.imports.append(\
'from %(module)s import %(metaclass)s' %kwargs
)
def SetUpTwistedClient(option, opt, value, parser, *args, **kwargs):
from ZSI.generate.containers import ServiceHeaderContainer
ServiceHeaderContainer.imports.remove('from ZSI import client')
ServiceHeaderContainer.imports.append('from ZSI.twisted import client')
def SetUpLazyEvaluation(option, opt, value, parser, *args, **kwargs):
from ZSI.generate.containers import TypecodeContainerBase
TypecodeContainerBase.lazy = True
def formatSchemaObject(fname, schemaObj):
""" In the case of a 'schema only' generation (-s) this creates
a fake wsdl object that will function w/in the adapters
and allow the generator to do what it needs to do.
"""
class fake:
pass
f = fake()
if fname.rfind('/'):
tmp = fname[fname.rfind('/') + 1 :].split('.')
else:
tmp = fname.split('.')
f.name = tmp[0] + '_' + tmp[1]
f.types = { schemaObj.targetNamespace : schemaObj }
return f
def wsdl2py(args=None):
"""
A utility for automatically generating client interface code from a wsdl
definition, and a set of classes representing element declarations and
type definitions. This will produce two files in the current working
directory named after the wsdl definition name.
eg. <definition name='SampleService'>
SampleService.py
SampleService_types.py
"""
op = optparse.OptionParser(usage="usage: %prog [options]",
description=wsdl2py.__doc__)
# Basic options
op.add_option("-f", "--file",
action="store", dest="file", default=None, type="string",
help="FILE to load wsdl from")
op.add_option("-u", "--url",
action="store", dest="url", default=None, type="string",
help="URL to load wsdl from")
op.add_option("-x", "--schema",
action="store_true", dest="schema", default=False,
help="process just the schema from an xsd file [no services]")
op.add_option("-d", "--debug",
action="callback", callback=SetDebugCallback,
help="debug output")
# WS Options
op.add_option("-a", "--address",
action="store_true", dest="address", default=False,
help="ws-addressing support, must include WS-Addressing schema.")
# pyclass Metaclass
op.add_option("-b", "--complexType",
action="callback", callback=SetPyclassMetaclass,
callback_kwargs={'module':'ZSI.generate.pyclass',
'metaclass':'pyclass_type'},
help="add convenience functions for complexTypes, including Getters, Setters, factory methods, and properties (via metaclass). *** DONT USE WITH --simple-naming ***")
# Lazy Evaluation of Typecodes (done at serialization/parsing when needed).
op.add_option("-l", "--lazy",
action="callback", callback=SetUpLazyEvaluation,
callback_kwargs={},
help="EXPERIMENTAL: recursion error solution, lazy evalution of typecodes")
# Use Twisted
op.add_option("-w", "--twisted",
action="callback", callback=SetUpTwistedClient,
callback_kwargs={'module':'ZSI.generate.pyclass',
'metaclass':'pyclass_type'},
help="generate a twisted.web client, dependencies python>=2.4, Twisted>=2.0.0, TwistedWeb>=0.5.0")
# Extended generation options
op.add_option("-e", "--extended",
action="store_true", dest="extended", default=False,
help="Do Extended code generation.")
op.add_option("-z", "--aname",
action="store", dest="aname", default=None, type="string",
help="pass in a function for attribute name creation")
op.add_option("-t", "--types",
action="store", dest="types", default=None, type="string",
help="file to load types from")
op.add_option("-o", "--output-dir",
action="store", dest="output_dir", default=".", type="string",
help="Write generated files to OUTPUT_DIR")
op.add_option("-s", "--simple-naming",
action="store_true", dest="simple_naming", default=False,
help="Simplify generated naming.")
op.add_option("-c", "--clientClassSuffix",
action="store", dest="clientClassSuffix", default=None, type="string",
help="Suffix to use for service client class (default \"SOAP\")")
op.add_option("-m", "--pyclassMapModule",
action="store", dest="pyclassMapModule", default=None, type="string",
help="Python file that maps external python classes to a schema type. The classes are used as the \"pyclass\" for that type. The module should contain a dict() called mapping in the format: mapping = {schemaTypeName:(moduleName.py,className) }")
if args is None:
(options, args) = op.parse_args()
else:
(options, args) = op.parse_args(args)
if not xor(options.file is None, options.url is None):
print 'Must specify either --file or --url option'
sys.exit(os.EX_USAGE)
location = options.file
if options.url is not None:
location = options.url
if options.schema is True:
reader = XMLSchema.SchemaReader(base_url=location)
else:
reader = WSDLTools.WSDLReader()
load = reader.loadFromFile
if options.url is not None:
load = reader.loadFromURL
wsdl = None
try:
wsdl = load(location)
except Exception, e:
print "Error loading %s: \n\t%s" % (location, e)
# exit code UNIX specific, Windows?
sys.exit(os.EX_NOINPUT)
if options.simple_naming:
# Use a different client suffix
WriteServiceModule.client_module_suffix = "_client"
# Write messages definitions to a separate file.
ServiceDescription.separate_messages = True
# Use more simple type and element class names
containers.SetTypeNameFunc( lambda n: '%s_' %(NC_to_CN(n)) )
containers.SetElementNameFunc( lambda n: '%s' %(NC_to_CN(n)) )
# Don't add "_" to the attribute name (remove when --aname works well)
containers.ContainerBase.func_aname = lambda instnc,n: TextProtect(str(n))
# write out the modules with their names rather than their number.
utility.namespace_name = lambda cls, ns: utility.Namespace2ModuleName(ns)
if options.clientClassSuffix:
from ZSI.generate.containers import ServiceContainerBase
ServiceContainerBase.clientClassSuffix = options.clientClassSuffix
if options.schema is True:
wsdl = formatSchemaObject(location, wsdl)
if options.aname is not None:
args = options.aname.rsplit('.',1)
assert len(args) == 2, 'expecting module.function'
# The following exec causes a syntax error.
#exec('from %s import %s as FUNC' %(args[0],args[1]))
assert callable(FUNC),\
'%s must be a callable method with one string parameter' %options.aname
from ZSI.generate.containers import TypecodeContainerBase
TypecodeContainerBase.func_aname = staticmethod(FUNC)
if options.pyclassMapModule != None:
mod = __import__(options.pyclassMapModule)
components = options.pyclassMapModule.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
extPyClasses = mod.mapping
else:
extPyClasses = None
wsm = WriteServiceModule(wsdl, addressing=options.address, do_extended=options.extended, extPyClasses=extPyClasses)
if options.types != None:
wsm.setTypesModuleName(options.types)
if options.schema is False:
fd = open(os.path.join(options.output_dir, '%s.py' %wsm.getClientModuleName()), 'w+')
# simple naming writes the messages to a separate file
if not options.simple_naming:
wsm.writeClient(fd)
else: # provide a separate file to store messages to.
msg_fd = open(os.path.join(options.output_dir, '%s.py' %wsm.getMessagesModuleName()), 'w+')
wsm.writeClient(fd, msg_fd=msg_fd)
msg_fd.close()
fd.close()
fd = open( os.path.join(options.output_dir, '%s.py' %wsm.getTypesModuleName()), 'w+')
wsm.writeTypes(fd)
fd.close()
def wsdl2dispatch(args=None):
"""
wsdl2dispatch
A utility for automatically generating service skeleton code from a wsdl
definition.
"""
op = optparse.OptionParser()
op.add_option("-f", "--file",
action="store", dest="file", default=None, type="string",
help="file to load wsdl from")
op.add_option("-u", "--url",
action="store", dest="url", default=None, type="string",
help="URL to load wsdl from")
op.add_option("-a", "--address",
action="store_true", dest="address", default=False,
help="ws-addressing support, must include WS-Addressing schema.")
op.add_option("-e", "--extended",
action="store_true", dest="extended", default=False,
help="Extended code generation.")
op.add_option("-d", "--debug",
action="callback", callback=SetDebugCallback,
help="debug output")
op.add_option("-t", "--types",
action="store", dest="types", default=None, type="string",
help="Write generated files to OUTPUT_DIR")
op.add_option("-o", "--output-dir",
action="store", dest="output_dir", default=".", type="string",
help="file to load types from")
op.add_option("-s", "--simple-naming",
action="store_true", dest="simple_naming", default=False,
help="Simplify generated naming.")
if args is None:
(options, args) = op.parse_args()
else:
(options, args) = op.parse_args(args)
if options.simple_naming:
ServiceDescription.server_module_suffix = '_interface'
ServiceDescription.func_aname = lambda instnc,n: TextProtect(n)
ServiceDescription.separate_messages = True
# use module names rather than their number.
utility.namespace_name = lambda cls, ns: utility.Namespace2ModuleName(ns)
reader = WSDLTools.WSDLReader()
wsdl = None
if options.file is not None:
wsdl = reader.loadFromFile(options.file)
elif options.url is not None:
wsdl = reader.loadFromURL(options.url)
assert wsdl is not None, 'Must specify WSDL either with --file or --url'
ss = None
if options.address is True:
if options.extended:
ss = DelAuthServiceDescriptionWSA(do_extended=options.extended)
else:
ss = ServiceDescriptionWSA(do_extended=options.extended)
else:
if options.extended:
ss = DelAuthServiceDescription(do_extended=options.extended)
else:
ss = ServiceDescription(do_extended=options.extended)
ss.fromWSDL(wsdl)
fd = open( os.path.join(options.output_dir, ss.getServiceModuleName()+'.py'), 'w+')
ss.write(fd)
fd.close()
| gpl-3.0 |
webgeodatavore/django | tests/gis_tests/geoapp/test_feeds.py | 292 | 4194 | from __future__ import unicode_literals
from xml.dom import minidom
from django.conf import settings
from django.contrib.sites.models import Site
from django.test import (
TestCase, modify_settings, override_settings, skipUnlessDBFeature,
)
from .models import City
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'})
@override_settings(ROOT_URLCONF='gis_tests.geoapp.urls')
@skipUnlessDBFeature("gis_enabled")
class GeoFeedTest(TestCase):
fixtures = ['initial']
def setUp(self):
Site(id=settings.SITE_ID, domain="example.com", name="example.com").save()
def assertChildNodes(self, elem, expected):
"Taken from syndication/tests.py."
actual = set(n.nodeName for n in elem.childNodes)
expected = set(expected)
self.assertEqual(actual, expected)
def test_geofeed_rss(self):
"Tests geographic feeds using GeoRSS over RSSv2."
# Uses `GEOSGeometry` in `item_geometry`
doc1 = minidom.parseString(self.client.get('/feeds/rss1/').content)
# Uses a 2-tuple in `item_geometry`
doc2 = minidom.parseString(self.client.get('/feeds/rss2/').content)
feed1, feed2 = doc1.firstChild, doc2.firstChild
# Making sure the box got added to the second GeoRSS feed.
self.assertChildNodes(feed2.getElementsByTagName('channel')[0],
['title', 'link', 'description', 'language',
'lastBuildDate', 'item', 'georss:box', 'atom:link']
)
# Incrementing through the feeds.
for feed in [feed1, feed2]:
# Ensuring the georss namespace was added to the <rss> element.
self.assertEqual(feed.getAttribute('xmlns:georss'), 'http://www.georss.org/georss')
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), City.objects.count())
# Ensuring the georss element was added to each item in the feed.
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'georss:point'])
def test_geofeed_atom(self):
"Testing geographic feeds using GeoRSS over Atom."
doc1 = minidom.parseString(self.client.get('/feeds/atom1/').content)
doc2 = minidom.parseString(self.client.get('/feeds/atom2/').content)
feed1, feed2 = doc1.firstChild, doc2.firstChild
# Making sure the box got added to the second GeoRSS feed.
self.assertChildNodes(feed2, ['title', 'link', 'id', 'updated', 'entry', 'georss:box'])
for feed in [feed1, feed2]:
# Ensuring the georsss namespace was added to the <feed> element.
self.assertEqual(feed.getAttribute('xmlns:georss'), 'http://www.georss.org/georss')
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), City.objects.count())
# Ensuring the georss element was added to each entry in the feed.
for entry in entries:
self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'georss:point'])
def test_geofeed_w3c(self):
"Testing geographic feeds using W3C Geo."
doc = minidom.parseString(self.client.get('/feeds/w3cgeo1/').content)
feed = doc.firstChild
# Ensuring the geo namespace was added to the <feed> element.
self.assertEqual(feed.getAttribute('xmlns:geo'), 'http://www.w3.org/2003/01/geo/wgs84_pos#')
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), City.objects.count())
# Ensuring the geo:lat and geo:lon element was added to each item in the feed.
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'geo:lat', 'geo:lon'])
# Boxes and Polygons aren't allowed in W3C Geo feeds.
self.assertRaises(ValueError, self.client.get, '/feeds/w3cgeo2/') # Box in <channel>
self.assertRaises(ValueError, self.client.get, '/feeds/w3cgeo3/') # Polygons in <entry>
| bsd-3-clause |
matthewalbani/scipy | scipy/stats/_continuous_distns.py | 7 | 140183 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy.misc.doccer import inherit_docstring_from
from scipy import optimize
from scipy import integrate
import scipy.special as sc
from scipy._lib._numpy_compat import broadcast_to
from . import _stats
from ._tukeylambda_stats import (tukeylambda_variance as _tlvar,
tukeylambda_kurtosis as _tlkurt)
from ._distn_infrastructure import (get_distribution_names, _kurtosis,
_lazyselect, _lazywhere, _ncx2_cdf,
_ncx2_log_pdf, _ncx2_pdf,
rv_continuous, _skew, valarray)
from ._constants import _XMIN, _EULER, _ZETA3, _XMAX, _LOGXMAX
## Kolmogorov-Smirnov one-sided and two-sided test statistics
class ksone_gen(rv_continuous):
"""General Kolmogorov-Smirnov one-sided test.
%(default)s
"""
def _cdf(self, x, n):
return 1.0 - sc.smirnov(n, x)
def _ppf(self, q, n):
return sc.smirnovi(n, 1.0 - q)
ksone = ksone_gen(a=0.0, name='ksone')
class kstwobign_gen(rv_continuous):
"""Kolmogorov-Smirnov two-sided test for large N.
%(default)s
"""
def _cdf(self, x):
return 1.0 - sc.kolmogorov(x)
def _sf(self, x):
return sc.kolmogorov(x)
def _ppf(self, q):
return sc.kolmogi(1.0 - q)
kstwobign = kstwobign_gen(a=0.0, name='kstwobign')
## Normal distribution
# loc = mu, scale = std
# Keep these implementations out of the class definition so they can be reused
# by other distributions.
_norm_pdf_C = np.sqrt(2*np.pi)
_norm_pdf_logC = np.log(_norm_pdf_C)
def _norm_pdf(x):
return np.exp(-x**2/2.0) / _norm_pdf_C
def _norm_logpdf(x):
return -x**2 / 2.0 - _norm_pdf_logC
def _norm_cdf(x):
return sc.ndtr(x)
def _norm_logcdf(x):
return sc.log_ndtr(x)
def _norm_ppf(q):
return sc.ndtri(q)
def _norm_sf(x):
return _norm_cdf(-x)
def _norm_logsf(x):
return _norm_logcdf(-x)
def _norm_isf(q):
return -_norm_ppf(q)
class norm_gen(rv_continuous):
"""A normal continuous random variable.
The location (loc) keyword specifies the mean.
The scale (scale) keyword specifies the standard deviation.
%(before_notes)s
Notes
-----
The probability density function for `norm` is::
norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
The survival function, ``norm.sf``, is also referred to as the
Q-function in some contexts (see, e.g.,
`Wikipedia's <https://en.wikipedia.org/wiki/Q-function>`_ definition).
%(after_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.standard_normal(self._size)
def _pdf(self, x):
return _norm_pdf(x)
def _logpdf(self, x):
return _norm_logpdf(x)
def _cdf(self, x):
return _norm_cdf(x)
def _logcdf(self, x):
return _norm_logcdf(x)
def _sf(self, x):
return _norm_sf(x)
def _logsf(self, x):
return _norm_logsf(x)
def _ppf(self, q):
return _norm_ppf(q)
def _isf(self, q):
return _norm_isf(q)
def _stats(self):
return 0.0, 1.0, 0.0, 0.0
def _entropy(self):
return 0.5*(np.log(2*np.pi)+1)
@inherit_docstring_from(rv_continuous)
def fit(self, data, **kwds):
"""%(super)s
This function (norm_gen.fit) uses explicit formulas for the maximum
likelihood estimation of the parameters, so the `optimizer` argument
is ignored.
"""
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if floc is None:
loc = data.mean()
else:
loc = floc
if fscale is None:
scale = np.sqrt(((data - loc)**2).mean())
else:
scale = fscale
return loc, scale
norm = norm_gen(name='norm')
class alpha_gen(rv_continuous):
"""An alpha continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `alpha` is::
alpha.pdf(x, a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2),
where ``Phi(alpha)`` is the normal CDF, ``x > 0``, and ``a > 0``.
`alpha` takes ``a`` as a shape parameter.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, a):
return 1.0/(x**2)/_norm_cdf(a)*_norm_pdf(a-1.0/x)
def _logpdf(self, x, a):
return -2*np.log(x) + _norm_logpdf(a-1.0/x) - np.log(_norm_cdf(a))
def _cdf(self, x, a):
return _norm_cdf(a-1.0/x) / _norm_cdf(a)
def _ppf(self, q, a):
return 1.0/np.asarray(a-sc.ndtri(q*_norm_cdf(a)))
def _stats(self, a):
return [np.inf]*2 + [np.nan]*2
alpha = alpha_gen(a=0.0, name='alpha')
class anglit_gen(rv_continuous):
"""An anglit continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `anglit` is::
anglit.pdf(x) = sin(2*x + pi/2) = cos(2*x),
for ``-pi/4 <= x <= pi/4``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return np.cos(2*x)
def _cdf(self, x):
return np.sin(x+np.pi/4)**2.0
def _ppf(self, q):
return np.arcsin(np.sqrt(q))-np.pi/4
def _stats(self):
return 0.0, np.pi*np.pi/16-0.5, 0.0, -2*(np.pi**4 - 96)/(np.pi*np.pi-8)**2
def _entropy(self):
return 1-np.log(2)
anglit = anglit_gen(a=-np.pi/4, b=np.pi/4, name='anglit')
class arcsine_gen(rv_continuous):
"""An arcsine continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `arcsine` is::
arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x)))
for ``0 < x < 1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
return 1.0/np.pi/np.sqrt(x*(1-x))
def _cdf(self, x):
return 2.0/np.pi*np.arcsin(np.sqrt(x))
def _ppf(self, q):
return np.sin(np.pi/2.0*q)**2.0
def _stats(self):
mu = 0.5
mu2 = 1.0/8
g1 = 0
g2 = -3.0/2.0
return mu, mu2, g1, g2
def _entropy(self):
return -0.24156447527049044468
arcsine = arcsine_gen(a=0.0, b=1.0, name='arcsine')
class FitDataError(ValueError):
# This exception is raised by, for example, beta_gen.fit when both floc
# and fscale are fixed and there are values in the data not in the open
# interval (floc, floc+fscale).
def __init__(self, distr, lower, upper):
self.args = (
"Invalid values in `data`. Maximum likelihood "
"estimation with {distr!r} requires that {lower!r} < x "
"< {upper!r} for each x in `data`.".format(
distr=distr, lower=lower, upper=upper),
)
class FitSolverError(RuntimeError):
# This exception is raised by, for example, beta_gen.fit when
# optimize.fsolve returns with ier != 1.
def __init__(self, mesg):
emsg = "Solver for the MLE equations failed to converge: "
emsg += mesg.replace('\n', '')
self.args = (emsg,)
def _beta_mle_a(a, b, n, s1):
# The zeros of this function give the MLE for `a`, with
# `b`, `n` and `s1` given. `s1` is the sum of the logs of
# the data. `n` is the number of data points.
psiab = sc.psi(a + b)
func = s1 - n * (-psiab + sc.psi(a))
return func
def _beta_mle_ab(theta, n, s1, s2):
# Zeros of this function are critical points of
# the maximum likelihood function. Solving this system
# for theta (which contains a and b) gives the MLE for a and b
# given `n`, `s1` and `s2`. `s1` is the sum of the logs of the data,
# and `s2` is the sum of the logs of 1 - data. `n` is the number
# of data points.
a, b = theta
psiab = sc.psi(a + b)
func = [s1 - n * (-psiab + sc.psi(a)),
s2 - n * (-psiab + sc.psi(b))]
return func
class beta_gen(rv_continuous):
"""A beta continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `beta` is::
gamma(a+b) * x**(a-1) * (1-x)**(b-1)
beta.pdf(x, a, b) = ------------------------------------
gamma(a)*gamma(b)
for ``0 < x < 1``, ``a > 0``, ``b > 0``, where ``gamma(z)`` is the gamma
function (`scipy.special.gamma`).
`beta` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, a, b):
return self._random_state.beta(a, b, self._size)
def _pdf(self, x, a, b):
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
lPx = sc.xlog1py(b - 1.0, -x) + sc.xlogy(a - 1.0, x)
lPx -= sc.betaln(a, b)
return lPx
def _cdf(self, x, a, b):
return sc.btdtr(a, b, x)
def _ppf(self, q, a, b):
return sc.btdtri(a, b, q)
def _stats(self, a, b):
mn = a*1.0 / (a + b)
var = (a*b*1.0)/(a+b+1.0)/(a+b)**2.0
g1 = 2.0*(b-a)*np.sqrt((1.0+a+b)/(a*b)) / (2+a+b)
g2 = 6.0*(a**3 + a**2*(1-2*b) + b**2*(1+b) - 2*a*b*(2+b))
g2 /= a*b*(a+b+2)*(a+b+3)
return mn, var, g1, g2
def _fitstart(self, data):
g1 = _skew(data)
g2 = _kurtosis(data)
def func(x):
a, b = x
sk = 2*(b-a)*np.sqrt(a + b + 1) / (a + b + 2) / np.sqrt(a*b)
ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2)
ku /= a*b*(a+b+2)*(a+b+3)
ku *= 6
return [sk-g1, ku-g2]
a, b = optimize.fsolve(func, (1.0, 1.0))
return super(beta_gen, self)._fitstart(data, args=(a, b))
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
"""%(super)s
In the special case where both `floc` and `fscale` are given, a
`ValueError` is raised if any value `x` in `data` does not satisfy
`floc < x < floc + fscale`.
"""
# Override rv_continuous.fit, so we can more efficiently handle the
# case where floc and fscale are given.
f0 = (kwds.get('f0', None) or kwds.get('fa', None) or
kwds.get('fix_a', None))
f1 = (kwds.get('f1', None) or kwds.get('fb', None) or
kwds.get('fix_b', None))
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None or fscale is None:
# do general fit
return super(beta_gen, self).fit(data, *args, **kwds)
if f0 is not None and f1 is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Special case: loc and scale are constrained, so we are fitting
# just the shape parameters. This can be done much more efficiently
# than the method used in `rv_continuous.fit`. (See the subsection
# "Two unknown parameters" in the section "Maximum likelihood" of
# the Wikipedia article on the Beta distribution for the formulas.)
# Normalize the data to the interval [0, 1].
data = (np.ravel(data) - floc) / fscale
if np.any(data <= 0) or np.any(data >= 1):
raise FitDataError("beta", lower=floc, upper=floc + fscale)
xbar = data.mean()
if f0 is not None or f1 is not None:
# One of the shape parameters is fixed.
if f0 is not None:
# The shape parameter a is fixed, so swap the parameters
# and flip the data. We always solve for `a`. The result
# will be swapped back before returning.
b = f0
data = 1 - data
xbar = 1 - xbar
else:
b = f1
# Initial guess for a. Use the formula for the mean of the beta
# distribution, E[x] = a / (a + b), to generate a reasonable
# starting point based on the mean of the data and the given
# value of b.
a = b * xbar / (1 - xbar)
# Compute the MLE for `a` by solving _beta_mle_a.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_a, a,
args=(b, len(data), np.log(data).sum()),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a = theta[0]
if f0 is not None:
# The shape parameter a was fixed, so swap back the
# parameters.
a, b = b, a
else:
# Neither of the shape parameters is fixed.
# s1 and s2 are used in the extra arguments passed to _beta_mle_ab
# by optimize.fsolve.
s1 = np.log(data).sum()
s2 = sc.log1p(-data).sum()
# Use the "method of moments" to estimate the initial
# guess for a and b.
fac = xbar * (1 - xbar) / data.var(ddof=0) - 1
a = xbar * fac
b = (1 - xbar) * fac
# Compute the MLE for a and b by solving _beta_mle_ab.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_ab, [a, b],
args=(len(data), s1, s2),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a, b = theta
return a, b, floc, fscale
beta = beta_gen(a=0.0, b=1.0, name='beta')
class betaprime_gen(rv_continuous):
"""A beta prime continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `betaprime` is::
betaprime.pdf(x, a, b) = x**(a-1) * (1+x)**(-a-b) / beta(a, b)
for ``x > 0``, ``a > 0``, ``b > 0``, where ``beta(a, b)`` is the beta
function (see `scipy.special.beta`).
`betaprime` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, a, b):
sz, rndm = self._size, self._random_state
u1 = gamma.rvs(a, size=sz, random_state=rndm)
u2 = gamma.rvs(b, size=sz, random_state=rndm)
return u1 / u2
def _pdf(self, x, a, b):
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
return sc.xlogy(a - 1.0, x) - sc.xlog1py(a + b, x) - sc.betaln(a, b)
def _cdf(self, x, a, b):
return sc.betainc(a, b, x/(1.+x))
def _munp(self, n, a, b):
if n == 1.0:
return np.where(b > 1,
a/(b-1.0),
np.inf)
elif n == 2.0:
return np.where(b > 2,
a*(a+1.0)/((b-2.0)*(b-1.0)),
np.inf)
elif n == 3.0:
return np.where(b > 3,
a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)),
np.inf)
elif n == 4.0:
return np.where(b > 4,
(a*(a + 1.0)*(a + 2.0)*(a + 3.0) /
((b - 4.0)*(b - 3.0)*(b - 2.0)*(b - 1.0))),
np.inf)
else:
raise NotImplementedError
betaprime = betaprime_gen(a=0.0, name='betaprime')
class bradford_gen(rv_continuous):
"""A Bradford continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `bradford` is::
bradford.pdf(x, c) = c / (k * (1+c*x)),
for ``0 < x < 1``, ``c > 0`` and ``k = log(1+c)``.
`bradford` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return c / (c*x + 1.0) / sc.log1p(c)
def _cdf(self, x, c):
return sc.log1p(c*x) / sc.log1p(c)
def _ppf(self, q, c):
return sc.expm1(q * sc.log1p(c)) / c
def _stats(self, c, moments='mv'):
k = np.log(1.0+c)
mu = (c-k)/(c*k)
mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k)
g1 = None
g2 = None
if 's' in moments:
g1 = np.sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3))
g1 /= np.sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k)
if 'k' in moments:
g2 = (c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3)
+ 6*c*k*k*(3*k-14) + 12*k**3)
g2 /= 3*c*(c*(k-2)+2*k)**2
return mu, mu2, g1, g2
def _entropy(self, c):
k = np.log(1+c)
return k/2.0 - np.log(c/k)
bradford = bradford_gen(a=0.0, b=1.0, name='bradford')
class burr_gen(rv_continuous):
"""A Burr (Type III) continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of either `burr` or ``burr12`` with ``d = 1``
burr12 : Burr Type XII distribution
Notes
-----
The probability density function for `burr` is::
burr.pdf(x, c, d) = c * d * x**(-c-1) * (1+x**(-c))**(-d-1)
for ``x > 0``.
`burr` takes ``c`` and ``d`` as shape parameters.
This is the PDF corresponding to the third CDF given in Burr's list;
specifically, it is equation (11) in Burr's paper [1]_.
%(after_notes)s
References
----------
.. [1] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c, d):
return c * d * (x**(-c - 1.0)) * ((1 + x**(-c))**(-d - 1.0))
def _cdf(self, x, c, d):
return (1 + x**(-c))**(-d)
def _ppf(self, q, c, d):
return (q**(-1.0/d) - 1)**(-1.0/c)
def _munp(self, n, c, d):
nc = 1. * n / c
return d * sc.beta(1.0 - nc, d + nc)
burr = burr_gen(a=0.0, name='burr')
class burr12_gen(rv_continuous):
"""A Burr (Type XII) continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of either `burr` or ``burr12`` with ``d = 1``
burr : Burr Type III distribution
Notes
-----
The probability density function for `burr` is::
burr12.pdf(x, c, d) = c * d * x**(c-1) * (1+x**(c))**(-d-1)
for ``x > 0``.
`burr12` takes ``c`` and ``d`` as shape parameters.
This is the PDF corresponding to the twelfth CDF given in Burr's list;
specifically, it is equation (20) in Burr's paper [1]_.
%(after_notes)s
The Burr type 12 distribution is also sometimes referred to as
the Singh-Maddala distribution from NIST [2]_.
References
----------
.. [1] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
.. [2] http://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/b12pdf.htm
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c, d):
return np.exp(self._logpdf(x, c, d))
def _logpdf(self, x, c, d):
return np.log(c) + np.log(d) + sc.xlogy(c - 1, x) + sc.xlog1py(-d-1, x**c)
def _cdf(self, x, c, d):
return -sc.expm1(self._logsf(x, c, d))
def _logcdf(self, x, c, d):
return sc.log1p(-(1 + x**c)**(-d))
def _sf(self, x, c, d):
return np.exp(self._logsf(x, c, d))
def _logsf(self, x, c, d):
return sc.xlog1py(-d, x**c)
def _ppf(self, q, c, d):
# The following is an implementation of
# ((1 - q)**(-1.0/d) - 1)**(1.0/c)
# that does a better job handling small values of q.
return sc.expm1(-1/d * sc.log1p(-q))**(1/c)
def _munp(self, n, c, d):
nc = 1. * n / c
return d * sc.beta(1.0 + nc, d - nc)
burr12 = burr12_gen(a=0.0, name='burr12')
class fisk_gen(burr_gen):
"""A Fisk continuous random variable.
The Fisk distribution is also known as the log-logistic distribution, and
equals the Burr distribution with ``d == 1``.
`fisk` takes ``c`` as a shape parameter.
%(before_notes)s
Notes
-----
The probability density function for `fisk` is::
fisk.pdf(x, c) = c * x**(-c-1) * (1 + x**(-c))**(-2)
for ``x > 0``.
`fisk` takes ``c`` as a shape parameters.
%(after_notes)s
See Also
--------
burr
%(example)s
"""
def _pdf(self, x, c):
return burr_gen._pdf(self, x, c, 1.0)
def _cdf(self, x, c):
return burr_gen._cdf(self, x, c, 1.0)
def _ppf(self, x, c):
return burr_gen._ppf(self, x, c, 1.0)
def _munp(self, n, c):
return burr_gen._munp(self, n, c, 1.0)
def _entropy(self, c):
return 2 - np.log(c)
fisk = fisk_gen(a=0.0, name='fisk')
# median = loc
class cauchy_gen(rv_continuous):
"""A Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `cauchy` is::
cauchy.pdf(x) = 1 / (pi * (1 + x**2))
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 1.0/np.pi/(1.0+x*x)
def _cdf(self, x):
return 0.5 + 1.0/np.pi*np.arctan(x)
def _ppf(self, q):
return np.tan(np.pi*q-np.pi/2.0)
def _sf(self, x):
return 0.5 - 1.0/np.pi*np.arctan(x)
def _isf(self, q):
return np.tan(np.pi/2.0-np.pi*q)
def _stats(self):
return np.nan, np.nan, np.nan, np.nan
def _entropy(self):
return np.log(4*np.pi)
def _fitstart(self, data, args=None):
# Initialize ML guesses using quartiles instead of moments.
p25, p50, p75 = np.percentile(data, [25, 50, 75])
return p50, (p75 - p25)/2
cauchy = cauchy_gen(name='cauchy')
class chi_gen(rv_continuous):
"""A chi continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi` is::
chi.pdf(x, df) = x**(df-1) * exp(-x**2/2) / (2**(df/2-1) * gamma(df/2))
for ``x > 0``.
Special cases of `chi` are:
- ``chi(1, loc, scale)`` is equivalent to `halfnorm`
- ``chi(2, 0, scale)`` is equivalent to `rayleigh`
- ``chi(3, 0, scale)`` is equivalent to `maxwell`
`chi` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df):
sz, rndm = self._size, self._random_state
return np.sqrt(chi2.rvs(df, size=sz, random_state=rndm))
def _pdf(self, x, df):
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
l = np.log(2) - .5*np.log(2)*df - sc.gammaln(.5*df)
return l + sc.xlogy(df - 1., x) - .5*x**2
def _cdf(self, x, df):
return sc.gammainc(.5*df, .5*x**2)
def _ppf(self, q, df):
return np.sqrt(2*sc.gammaincinv(.5*df, q))
def _stats(self, df):
mu = np.sqrt(2)*sc.gamma(df/2.0+0.5)/sc.gamma(df/2.0)
mu2 = df - mu*mu
g1 = (2*mu**3.0 + mu*(1-2*df))/np.asarray(np.power(mu2, 1.5))
g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1)
g2 /= np.asarray(mu2**2.0)
return mu, mu2, g1, g2
chi = chi_gen(a=0.0, name='chi')
## Chi-squared (gamma-distributed with loc=0 and scale=2 and shape=df/2)
class chi2_gen(rv_continuous):
"""A chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi2` is::
chi2.pdf(x, df) = 1 / (2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2)
`chi2` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df):
return self._random_state.chisquare(df, self._size)
def _pdf(self, x, df):
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
return sc.xlogy(df/2.-1, x) - x/2. - sc.gammaln(df/2.) - (np.log(2)*df)/2.
def _cdf(self, x, df):
return sc.chdtr(df, x)
def _sf(self, x, df):
return sc.chdtrc(df, x)
def _isf(self, p, df):
return sc.chdtri(df, p)
def _ppf(self, p, df):
return self._isf(1.0-p, df)
def _stats(self, df):
mu = df
mu2 = 2*df
g1 = 2*np.sqrt(2.0/df)
g2 = 12.0/df
return mu, mu2, g1, g2
chi2 = chi2_gen(a=0.0, name='chi2')
class cosine_gen(rv_continuous):
"""A cosine continuous random variable.
%(before_notes)s
Notes
-----
The cosine distribution is an approximation to the normal distribution.
The probability density function for `cosine` is::
cosine.pdf(x) = 1/(2*pi) * (1+cos(x))
for ``-pi <= x <= pi``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 1.0/2/np.pi*(1+np.cos(x))
def _cdf(self, x):
return 1.0/2/np.pi*(np.pi + x + np.sin(x))
def _stats(self):
return 0.0, np.pi*np.pi/3.0-2.0, 0.0, -6.0*(np.pi**4-90)/(5.0*(np.pi*np.pi-6)**2)
def _entropy(self):
return np.log(4*np.pi)-1.0
cosine = cosine_gen(a=-np.pi, b=np.pi, name='cosine')
class dgamma_gen(rv_continuous):
"""A double gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dgamma` is::
dgamma.pdf(x, a) = 1 / (2*gamma(a)) * abs(x)**(a-1) * exp(-abs(x))
for ``a > 0``.
`dgamma` takes ``a`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
sz, rndm = self._size, self._random_state
u = rndm.random_sample(size=sz)
gm = gamma.rvs(a, size=sz, random_state=rndm)
return gm * np.where(u >= 0.5, 1, -1)
def _pdf(self, x, a):
ax = abs(x)
return 1.0/(2*sc.gamma(a))*ax**(a-1.0) * np.exp(-ax)
def _logpdf(self, x, a):
ax = abs(x)
return sc.xlogy(a - 1.0, ax) - ax - np.log(2) - sc.gammaln(a)
def _cdf(self, x, a):
fac = 0.5*sc.gammainc(a, abs(x))
return np.where(x > 0, 0.5 + fac, 0.5 - fac)
def _sf(self, x, a):
fac = 0.5*sc.gammainc(a, abs(x))
return np.where(x > 0, 0.5-fac, 0.5+fac)
def _ppf(self, q, a):
fac = sc.gammainccinv(a, 1-abs(2*q-1))
return np.where(q > 0.5, fac, -fac)
def _stats(self, a):
mu2 = a*(a+1.0)
return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0
dgamma = dgamma_gen(name='dgamma')
class dweibull_gen(rv_continuous):
"""A double Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dweibull` is::
dweibull.pdf(x, c) = c / 2 * abs(x)**(c-1) * exp(-abs(x)**c)
`dweibull` takes ``d`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, c):
sz, rndm = self._size, self._random_state
u = rndm.random_sample(size=sz)
w = weibull_min.rvs(c, size=sz, random_state=rndm)
return w * (np.where(u >= 0.5, 1, -1))
def _pdf(self, x, c):
ax = abs(x)
Px = c / 2.0 * ax**(c-1.0) * np.exp(-ax**c)
return Px
def _logpdf(self, x, c):
ax = abs(x)
return np.log(c) - np.log(2.0) + sc.xlogy(c - 1.0, ax) - ax**c
def _cdf(self, x, c):
Cx1 = 0.5 * np.exp(-abs(x)**c)
return np.where(x > 0, 1 - Cx1, Cx1)
def _ppf(self, q, c):
fac = 2. * np.where(q <= 0.5, q, 1. - q)
fac = np.power(-np.log(fac), 1.0 / c)
return np.where(q > 0.5, fac, -fac)
def _munp(self, n, c):
return (1 - (n % 2)) * sc.gamma(1.0 + 1.0 * n / c)
# since we know that all odd moments are zeros, return them at once.
# returning Nones from _stats makes the public stats call _munp
# so overall we're saving one or two gamma function evaluations here.
def _stats(self, c):
return 0, None, 0, None
dweibull = dweibull_gen(name='dweibull')
## Exponential (gamma distributed with a=1.0, loc=loc and scale=scale)
class expon_gen(rv_continuous):
"""An exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `expon` is::
expon.pdf(x) = exp(-x)
for ``x >= 0``.
%(after_notes)s
A common parameterization for `expon` is in terms of the rate parameter
``lambda``, such that ``pdf = lambda * exp(-lambda * x)``. This
parameterization corresponds to using ``scale = 1 / lambda``.
%(example)s
"""
def _rvs(self):
return self._random_state.standard_exponential(self._size)
def _pdf(self, x):
return np.exp(-x)
def _logpdf(self, x):
return -x
def _cdf(self, x):
return -sc.expm1(-x)
def _ppf(self, q):
return -sc.log1p(-q)
def _sf(self, x):
return np.exp(-x)
def _logsf(self, x):
return -x
def _isf(self, q):
return -np.log(q)
def _stats(self):
return 1.0, 1.0, 2.0, 6.0
def _entropy(self):
return 1.0
expon = expon_gen(a=0.0, name='expon')
## Exponentially Modified Normal (exponential distribution
## convolved with a Normal).
## This is called an exponentially modified gaussian on wikipedia
class exponnorm_gen(rv_continuous):
"""An exponentially modified Normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponnorm` is::
exponnorm.pdf(x, K) =
1/(2*K) exp(1/(2 * K**2)) exp(-x / K) * erfc-(x - 1/K) / sqrt(2))
where the shape parameter ``K > 0``.
It can be thought of as the sum of a normally distributed random
value with mean ``loc`` and sigma ``scale`` and an exponentially
distributed random number with a pdf proportional to ``exp(-lambda * x)``
where ``lambda = (K * scale)**(-1)``.
%(after_notes)s
An alternative parameterization of this distribution (for example, in
`Wikipedia <http://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution>`_)
involves three parameters, :math:`\mu`, :math:`\lambda` and :math:`\sigma`.
In the present parameterization this corresponds to having ``loc`` and
``scale`` equal to :math:`\mu` and :math:`\sigma`, respectively, and
shape parameter :math:`K = 1/\sigma\lambda`.
.. versionadded:: 0.16.0
%(example)s
"""
def _rvs(self, K):
expval = self._random_state.standard_exponential(self._size) * K
gval = self._random_state.standard_normal(self._size)
return expval + gval
def _pdf(self, x, K):
invK = 1.0 / K
exparg = 0.5 * invK**2 - invK * x
# Avoid overflows; setting np.exp(exparg) to the max float works
# all right here
expval = _lazywhere(exparg < _LOGXMAX, (exparg,), np.exp, _XMAX)
return 0.5 * invK * expval * sc.erfc(-(x - invK) / np.sqrt(2))
def _logpdf(self, x, K):
invK = 1.0 / K
exparg = 0.5 * invK**2 - invK * x
return exparg + np.log(0.5 * invK * sc.erfc(-(x - invK) / np.sqrt(2)))
def _cdf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
return _norm_cdf(x) - np.exp(expval) * _norm_cdf(x - invK)
def _sf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
return _norm_cdf(-x) + np.exp(expval) * _norm_cdf(x - invK)
def _stats(self, K):
K2 = K * K
opK2 = 1.0 + K2
skw = 2 * K**3 * opK2**(-1.5)
krt = 6.0 * K2 * K2 * opK2**(-2)
return K, opK2, skw, krt
exponnorm = exponnorm_gen(name='exponnorm')
class exponweib_gen(rv_continuous):
"""An exponentiated Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponweib` is::
exponweib.pdf(x, a, c) =
a * c * (1-exp(-x**c))**(a-1) * exp(-x**c)*x**(c-1)
for ``x > 0``, ``a > 0``, ``c > 0``.
`exponweib` takes ``a`` and ``c`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, a, c):
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
negxc = -x**c
exm1c = -sc.expm1(negxc)
logp = (np.log(a) + np.log(c) + sc.xlogy(a - 1.0, exm1c) +
negxc + sc.xlogy(c - 1.0, x))
return logp
def _cdf(self, x, a, c):
exm1c = -sc.expm1(-x**c)
return exm1c**a
def _ppf(self, q, a, c):
return (-sc.log1p(-q**(1.0/a)))**np.asarray(1.0/c)
exponweib = exponweib_gen(a=0.0, name='exponweib')
class exponpow_gen(rv_continuous):
"""An exponential power continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponpow` is::
exponpow.pdf(x, b) = b * x**(b-1) * exp(1 + x**b - exp(x**b))
for ``x >= 0``, ``b > 0``. Note that this is a different distribution
from the exponential power distribution that is also known under the names
"generalized normal" or "generalized Gaussian".
`exponpow` takes ``b`` as a shape parameter.
%(after_notes)s
References
----------
http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Exponentialpower.pdf
%(example)s
"""
def _pdf(self, x, b):
return np.exp(self._logpdf(x, b))
def _logpdf(self, x, b):
xb = x**b
f = 1 + np.log(b) + sc.xlogy(b - 1.0, x) + xb - np.exp(xb)
return f
def _cdf(self, x, b):
return -sc.expm1(-sc.expm1(x**b))
def _sf(self, x, b):
return np.exp(-sc.expm1(x**b))
def _isf(self, x, b):
return (sc.log1p(-np.log(x)))**(1./b)
def _ppf(self, q, b):
return pow(sc.log1p(-sc.log1p(-q)), 1.0/b)
exponpow = exponpow_gen(a=0.0, name='exponpow')
class fatiguelife_gen(rv_continuous):
"""A fatigue-life (Birnbaum-Saunders) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `fatiguelife` is::
fatiguelife.pdf(x, c) =
(x+1) / (2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2))
for ``x > 0``.
`fatiguelife` takes ``c`` as a shape parameter.
%(after_notes)s
References
----------
.. [1] "Birnbaum-Saunders distribution",
http://en.wikipedia.org/wiki/Birnbaum-Saunders_distribution
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, c):
z = self._random_state.standard_normal(self._size)
x = 0.5*c*z
x2 = x*x
t = 1.0 + 2*x2 + 2*x*np.sqrt(1 + x2)
return t
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return (np.log(x+1) - (x-1)**2 / (2.0*x*c**2) - np.log(2*c) -
0.5*(np.log(2*np.pi) + 3*np.log(x)))
def _cdf(self, x, c):
return _norm_cdf(1.0 / c * (np.sqrt(x) - 1.0/np.sqrt(x)))
def _ppf(self, q, c):
tmp = c*sc.ndtri(q)
return 0.25 * (tmp + np.sqrt(tmp**2 + 4))**2
def _stats(self, c):
# NB: the formula for kurtosis in wikipedia seems to have an error:
# it's 40, not 41. At least it disagrees with the one from Wolfram
# Alpha. And the latter one, below, passes the tests, while the wiki
# one doesn't So far I didn't have the guts to actually check the
# coefficients from the expressions for the raw moments.
c2 = c*c
mu = c2 / 2.0 + 1.0
den = 5.0 * c2 + 4.0
mu2 = c2*den / 4.0
g1 = 4 * c * (11*c2 + 6.0) / np.power(den, 1.5)
g2 = 6 * c2 * (93*c2 + 40.0) / den**2.0
return mu, mu2, g1, g2
fatiguelife = fatiguelife_gen(a=0.0, name='fatiguelife')
class foldcauchy_gen(rv_continuous):
"""A folded Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldcauchy` is::
foldcauchy.pdf(x, c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2))
for ``x >= 0``.
`foldcauchy` takes ``c`` as a shape parameter.
%(example)s
"""
def _rvs(self, c):
return abs(cauchy.rvs(loc=c, size=self._size,
random_state=self._random_state))
def _pdf(self, x, c):
return 1.0/np.pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2))
def _cdf(self, x, c):
return 1.0/np.pi*(np.arctan(x-c) + np.arctan(x+c))
def _stats(self, c):
return np.inf, np.inf, np.nan, np.nan
foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy')
class f_gen(rv_continuous):
"""An F continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `f` is::
df2**(df2/2) * df1**(df1/2) * x**(df1/2-1)
F.pdf(x, df1, df2) = --------------------------------------------
(df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2)
for ``x > 0``.
`f` takes ``dfn`` and ``dfd`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, dfn, dfd):
return self._random_state.f(dfn, dfd, self._size)
def _pdf(self, x, dfn, dfd):
return np.exp(self._logpdf(x, dfn, dfd))
def _logpdf(self, x, dfn, dfd):
n = 1.0 * dfn
m = 1.0 * dfd
lPx = m/2 * np.log(m) + n/2 * np.log(n) + (n/2 - 1) * np.log(x)
lPx -= ((n+m)/2) * np.log(m + n*x) + sc.betaln(n/2, m/2)
return lPx
def _cdf(self, x, dfn, dfd):
return sc.fdtr(dfn, dfd, x)
def _sf(self, x, dfn, dfd):
return sc.fdtrc(dfn, dfd, x)
def _ppf(self, q, dfn, dfd):
return sc.fdtri(dfn, dfd, q)
def _stats(self, dfn, dfd):
v1, v2 = 1. * dfn, 1. * dfd
v2_2, v2_4, v2_6, v2_8 = v2 - 2., v2 - 4., v2 - 6., v2 - 8.
mu = _lazywhere(
v2 > 2, (v2, v2_2),
lambda v2, v2_2: v2 / v2_2,
np.inf)
mu2 = _lazywhere(
v2 > 4, (v1, v2, v2_2, v2_4),
lambda v1, v2, v2_2, v2_4:
2 * v2 * v2 * (v1 + v2_2) / (v1 * v2_2**2 * v2_4),
np.inf)
g1 = _lazywhere(
v2 > 6, (v1, v2_2, v2_4, v2_6),
lambda v1, v2_2, v2_4, v2_6:
(2 * v1 + v2_2) / v2_6 * np.sqrt(v2_4 / (v1 * (v1 + v2_2))),
np.nan)
g1 *= np.sqrt(8.)
g2 = _lazywhere(
v2 > 8, (g1, v2_6, v2_8),
lambda g1, v2_6, v2_8: (8 + g1 * g1 * v2_6) / v2_8,
np.nan)
g2 *= 3. / 2.
return mu, mu2, g1, g2
f = f_gen(a=0.0, name='f')
## Folded Normal
## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
##
## note: regress docs have scale parameter correct, but first parameter
## he gives is a shape parameter A = c * scale
## Half-normal is folded normal with shape-parameter c=0.
class foldnorm_gen(rv_continuous):
"""A folded normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldnorm` is::
foldnormal.pdf(x, c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2)
for ``c >= 0``.
`foldnorm` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return c >= 0
def _rvs(self, c):
return abs(self._random_state.standard_normal(self._size) + c)
def _pdf(self, x, c):
return _norm_pdf(x + c) + _norm_pdf(x-c)
def _cdf(self, x, c):
return _norm_cdf(x-c) + _norm_cdf(x+c) - 1.0
def _stats(self, c):
# Regina C. Elandt, Technometrics 3, 551 (1961)
# http://www.jstor.org/stable/1266561
#
c2 = c*c
expfac = np.exp(-0.5*c2) / np.sqrt(2.*np.pi)
mu = 2.*expfac + c * sc.erf(c/np.sqrt(2))
mu2 = c2 + 1 - mu*mu
g1 = 2. * (mu*mu*mu - c2*mu - expfac)
g1 /= np.power(mu2, 1.5)
g2 = c2 * (c2 + 6.) + 3 + 8.*expfac*mu
g2 += (2. * (c2 - 3.) - 3. * mu**2) * mu**2
g2 = g2 / mu2**2.0 - 3.
return mu, mu2, g1, g2
foldnorm = foldnorm_gen(a=0.0, name='foldnorm')
## Extreme Value Type II or Frechet
## (defined in Regress+ documentation as Extreme LB) as
## a limiting value distribution.
##
class frechet_r_gen(rv_continuous):
"""A Frechet right (or Weibull minimum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_min : The same distribution as `frechet_r`.
frechet_l, weibull_max
Notes
-----
The probability density function for `frechet_r` is::
frechet_r.pdf(x, c) = c * x**(c-1) * exp(-x**c)
for ``x > 0``, ``c > 0``.
`frechet_r` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return c*pow(x, c-1)*np.exp(-pow(x, c))
def _logpdf(self, x, c):
return np.log(c) + sc.xlogy(c - 1, x) - pow(x, c)
def _cdf(self, x, c):
return -sc.expm1(-pow(x, c))
def _sf(self, x, c):
return np.exp(-pow(x, c))
def _logsf(self, x, c):
return -pow(x, c)
def _ppf(self, q, c):
return pow(-sc.log1p(-q), 1.0/c)
def _munp(self, n, c):
return sc.gamma(1.0+n*1.0/c)
def _entropy(self, c):
return -_EULER / c - np.log(c) + _EULER + 1
frechet_r = frechet_r_gen(a=0.0, name='frechet_r')
weibull_min = frechet_r_gen(a=0.0, name='weibull_min')
class frechet_l_gen(rv_continuous):
"""A Frechet left (or Weibull maximum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_max : The same distribution as `frechet_l`.
frechet_r, weibull_min
Notes
-----
The probability density function for `frechet_l` is::
frechet_l.pdf(x, c) = c * (-x)**(c-1) * exp(-(-x)**c)
for ``x < 0``, ``c > 0``.
`frechet_l` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return c*pow(-x, c-1)*np.exp(-pow(-x, c))
def _logpdf(self, x, c):
return np.log(c) + sc.xlogy(c-1, -x) - pow(-x, c)
def _cdf(self, x, c):
return np.exp(-pow(-x, c))
def _logcdf(self, x, c):
return -pow(-x, c)
def _sf(self, x, c):
return -sc.expm1(-pow(-x, c))
def _ppf(self, q, c):
return -pow(-np.log(q), 1.0/c)
def _munp(self, n, c):
val = sc.gamma(1.0+n*1.0/c)
if int(n) % 2:
sgn = -1
else:
sgn = 1
return sgn * val
def _entropy(self, c):
return -_EULER / c - np.log(c) + _EULER + 1
frechet_l = frechet_l_gen(b=0.0, name='frechet_l')
weibull_max = frechet_l_gen(b=0.0, name='weibull_max')
class genlogistic_gen(rv_continuous):
"""A generalized logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genlogistic` is::
genlogistic.pdf(x, c) = c * exp(-x) / (1 + exp(-x))**(c+1)
for ``x > 0``, ``c > 0``.
`genlogistic` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return np.log(c) - x - (c+1.0)*sc.log1p(np.exp(-x))
def _cdf(self, x, c):
Cx = (1+np.exp(-x))**(-c)
return Cx
def _ppf(self, q, c):
vals = -np.log(pow(q, -1.0/c)-1)
return vals
def _stats(self, c):
mu = _EULER + sc.psi(c)
mu2 = np.pi*np.pi/6.0 + sc.zeta(2, c)
g1 = -2*sc.zeta(3, c) + 2*_ZETA3
g1 /= np.power(mu2, 1.5)
g2 = np.pi**4/15.0 + 6*sc.zeta(4, c)
g2 /= mu2**2.0
return mu, mu2, g1, g2
genlogistic = genlogistic_gen(name='genlogistic')
class genpareto_gen(rv_continuous):
"""A generalized Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genpareto` is::
genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c)
defined for ``x >= 0`` if ``c >=0``, and for
``0 <= x <= -1/c`` if ``c < 0``.
`genpareto` takes ``c`` as a shape parameter.
For ``c == 0``, `genpareto` reduces to the exponential
distribution, `expon`::
genpareto.pdf(x, c=0) = exp(-x)
For ``c == -1``, `genpareto` is uniform on ``[0, 1]``::
genpareto.cdf(x, c=-1) = x
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
c = np.asarray(c)
self.b = _lazywhere(c < 0, (c,),
lambda c: -1. / c,
np.inf)
return True
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.xlog1py(c + 1., c*x) / c,
-x)
def _cdf(self, x, c):
return -sc.inv_boxcox1p(-x, -c)
def _sf(self, x, c):
return sc.inv_boxcox(-x, -c)
def _logsf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.log1p(c*x) / c,
-x)
def _ppf(self, q, c):
return -sc.boxcox1p(-q, -c)
def _isf(self, q, c):
return -sc.boxcox(q, -c)
def _munp(self, n, c):
def __munp(n, c):
val = 0.0
k = np.arange(0, n + 1)
for ki, cnk in zip(k, sc.comb(n, k)):
val = val + cnk * (-1) ** ki / (1.0 - c * ki)
return np.where(c * n < 1, val * (-1.0 / c) ** n, np.inf)
return _lazywhere(c != 0, (c,),
lambda c: __munp(n, c),
sc.gamma(n + 1))
def _entropy(self, c):
return 1. + c
genpareto = genpareto_gen(a=0.0, name='genpareto')
class genexpon_gen(rv_continuous):
"""A generalized exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genexpon` is::
genexpon.pdf(x, a, b, c) = (a + b * (1 - exp(-c*x))) * \
exp(-a*x - b*x + b/c * (1-exp(-c*x)))
for ``x >= 0``, ``a, b, c > 0``.
`genexpon` takes ``a``, ``b`` and ``c`` as shape parameters.
%(after_notes)s
References
----------
H.K. Ryu, "An Extension of Marshall and Olkin's Bivariate Exponential
Distribution", Journal of the American Statistical Association, 1993.
N. Balakrishnan, "The Exponential Distribution: Theory, Methods and
Applications", Asit P. Basu.
%(example)s
"""
def _pdf(self, x, a, b, c):
return (a + b*(-sc.expm1(-c*x)))*np.exp((-a-b)*x +
b*(-sc.expm1(-c*x))/c)
def _cdf(self, x, a, b, c):
return -sc.expm1((-a-b)*x + b*(-sc.expm1(-c*x))/c)
def _logpdf(self, x, a, b, c):
return np.log(a+b*(-sc.expm1(-c*x))) + (-a-b)*x+b*(-sc.expm1(-c*x))/c
genexpon = genexpon_gen(a=0.0, name='genexpon')
class genextreme_gen(rv_continuous):
"""A generalized extreme value continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r
Notes
-----
For ``c=0``, `genextreme` is equal to `gumbel_r`.
The probability density function for `genextreme` is::
genextreme.pdf(x, c) =
exp(-exp(-x))*exp(-x), for c==0
exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1), for x <= 1/c, c > 0
Note that several sources and software packages use the opposite
convention for the sign of the shape parameter ``c``.
`genextreme` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
self.b = np.where(c > 0, 1.0 / np.maximum(c, _XMIN), np.inf)
self.a = np.where(c < 0, 1.0 / np.minimum(c, -_XMIN), -np.inf)
return np.where(abs(c) == np.inf, 0, 1)
def _loglogcdf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: sc.log1p(-c*x)/c, -x)
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
cx = _lazywhere((x == x) & (c != 0), (x, c), lambda x, c: c*x, 0.0)
logex2 = sc.log1p(-cx)
logpex2 = self._loglogcdf(x, c)
pex2 = np.exp(logpex2)
# Handle special cases
np.putmask(logpex2, (c == 0) & (x == -np.inf), 0.0)
logpdf = np.where((cx == 1) | (cx == -np.inf),
-np.inf,
-pex2+logpex2-logex2)
np.putmask(logpdf, (c == 1) & (x == 1), 0.0)
return logpdf
def _logcdf(self, x, c):
return -np.exp(self._loglogcdf(x, c))
def _cdf(self, x, c):
return np.exp(self._logcdf(x, c))
def _sf(self, x, c):
return -sc.expm1(self._logcdf(x, c))
def _ppf(self, q, c):
x = -np.log(-np.log(q))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.expm1(-c * x) / c, x)
def _isf(self, q, c):
x = -np.log(-sc.log1p(-q))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.expm1(-c * x) / c, x)
def _stats(self, c):
g = lambda n: sc.gamma(n*c + 1)
g1 = g(1)
g2 = g(2)
g3 = g(3)
g4 = g(4)
g2mg12 = np.where(abs(c) < 1e-7, (c*np.pi)**2.0/6.0, g2-g1**2.0)
gam2k = np.where(abs(c) < 1e-7, np.pi**2.0/6.0,
sc.expm1(sc.gammaln(2.0*c+1.0)-2*sc.gammaln(c + 1.0))/c**2.0)
eps = 1e-14
gamk = np.where(abs(c) < eps, -_EULER, sc.expm1(sc.gammaln(c + 1))/c)
m = np.where(c < -1.0, np.nan, -gamk)
v = np.where(c < -0.5, np.nan, g1**2.0*gam2k)
# skewness
sk1 = np.where(c < -1./3, np.nan,
np.sign(c)*(-g3+(g2+2*g2mg12)*g1)/((g2mg12)**(3./2.)))
sk = np.where(abs(c) <= eps**0.29, 12*np.sqrt(6)*_ZETA3/np.pi**3, sk1)
# kurtosis
ku1 = np.where(c < -1./4, np.nan,
(g4+(-4*g3+3*(g2+g2mg12)*g1)*g1)/((g2mg12)**2))
ku = np.where(abs(c) <= (eps)**0.23, 12.0/5.0, ku1-3.0)
return m, v, sk, ku
def _fitstart(self, data):
# This is better than the default shape of (1,).
g = _skew(data)
if g < 0:
a = 0.5
else:
a = -0.5
return super(genextreme_gen, self)._fitstart(data, args=(a,))
def _munp(self, n, c):
k = np.arange(0, n+1)
vals = 1.0/c**n * np.sum(
sc.comb(n, k) * (-1)**k * sc.gamma(c*k + 1),
axis=0)
return np.where(c*n > -1, vals, np.inf)
def _entropy(self, c):
return _EULER*(1 - c) + 1
genextreme = genextreme_gen(name='genextreme')
def _digammainv(y):
# Inverse of the digamma function (real positive arguments only).
# This function is used in the `fit` method of `gamma_gen`.
# The function uses either optimize.fsolve or optimize.newton
# to solve `sc.digamma(x) - y = 0`. There is probably room for
# improvement, but currently it works over a wide range of y:
# >>> y = 64*np.random.randn(1000000)
# >>> y.min(), y.max()
# (-311.43592651416662, 351.77388222276869)
# x = [_digammainv(t) for t in y]
# np.abs(sc.digamma(x) - y).max()
# 1.1368683772161603e-13
#
_em = 0.5772156649015328606065120
func = lambda x: sc.digamma(x) - y
if y > -0.125:
x0 = np.exp(y) + 0.5
if y < 10:
# Some experimentation shows that newton reliably converges
# must faster than fsolve in this y range. For larger y,
# newton sometimes fails to converge.
value = optimize.newton(func, x0, tol=1e-10)
return value
elif y > -3:
x0 = np.exp(y/2.332) + 0.08661
else:
x0 = 1.0 / (-y - _em)
value, info, ier, mesg = optimize.fsolve(func, x0, xtol=1e-11,
full_output=True)
if ier != 1:
raise RuntimeError("_digammainv: fsolve failed, y = %r" % y)
return value[0]
## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition)
## gamma(a, loc, scale) with a an integer is the Erlang distribution
## gamma(1, loc, scale) is the Exponential distribution
## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom.
class gamma_gen(rv_continuous):
"""A gamma continuous random variable.
%(before_notes)s
See Also
--------
erlang, expon
Notes
-----
The probability density function for `gamma` is::
gamma.pdf(x, a) = x**(a-1) * exp(-x) / gamma(a)
for ``x >= 0``, ``a > 0``. Here ``gamma(a)`` refers to the gamma function.
`gamma` has a shape parameter `a` which needs to be set explicitly.
When ``a`` is an integer, `gamma` reduces to the Erlang
distribution, and when ``a=1`` to the exponential distribution.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.standard_gamma(a, self._size)
def _pdf(self, x, a):
return np.exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return sc.xlogy(a-1.0, x) - x - sc.gammaln(a)
def _cdf(self, x, a):
return sc.gammainc(a, x)
def _sf(self, x, a):
return sc.gammaincc(a, x)
def _ppf(self, q, a):
return sc.gammaincinv(a, q)
def _stats(self, a):
return a, a, 2.0/np.sqrt(a), 6.0/a
def _entropy(self, a):
return sc.psi(a)*(1-a) + a + sc.gammaln(a)
def _fitstart(self, data):
# The skewness of the gamma distribution is `4 / np.sqrt(a)`.
# We invert that to estimate the shape `a` using the skewness
# of the data. The formula is regularized with 1e-8 in the
# denominator to allow for degenerate data where the skewness
# is close to 0.
a = 4 / (1e-8 + _skew(data)**2)
return super(gamma_gen, self)._fitstart(data, args=(a,))
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
f0 = (kwds.get('f0', None) or kwds.get('fa', None) or
kwds.get('fix_a', None))
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None:
# loc is not fixed. Use the default fit method.
return super(gamma_gen, self).fit(data, *args, **kwds)
# Special case: loc is fixed.
if f0 is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Fixed location is handled by shifting the data.
data = np.asarray(data)
if np.any(data <= floc):
raise FitDataError("gamma", lower=floc, upper=np.inf)
if floc != 0:
# Don't do the subtraction in-place, because `data` might be a
# view of the input array.
data = data - floc
xbar = data.mean()
# Three cases to handle:
# * shape and scale both free
# * shape fixed, scale free
# * shape free, scale fixed
if fscale is None:
# scale is free
if f0 is not None:
# shape is fixed
a = f0
else:
# shape and scale are both free.
# The MLE for the shape parameter `a` is the solution to:
# np.log(a) - sc.digamma(a) - np.log(xbar) + np.log(data.mean) = 0
s = np.log(xbar) - np.log(data).mean()
func = lambda a: np.log(a) - sc.digamma(a) - s
aest = (3-s + np.sqrt((s-3)**2 + 24*s)) / (12*s)
xa = aest*(1-0.4)
xb = aest*(1+0.4)
a = optimize.brentq(func, xa, xb, disp=0)
# The MLE for the scale parameter is just the data mean
# divided by the shape parameter.
scale = xbar / a
else:
# scale is fixed, shape is free
# The MLE for the shape parameter `a` is the solution to:
# sc.digamma(a) - np.log(data).mean() + np.log(fscale) = 0
c = np.log(data).mean() - np.log(fscale)
a = _digammainv(c)
scale = fscale
return a, floc, scale
gamma = gamma_gen(a=0.0, name='gamma')
class erlang_gen(gamma_gen):
"""An Erlang continuous random variable.
%(before_notes)s
See Also
--------
gamma
Notes
-----
The Erlang distribution is a special case of the Gamma distribution, with
the shape parameter `a` an integer. Note that this restriction is not
enforced by `erlang`. It will, however, generate a warning the first time
a non-integer value is used for the shape parameter.
Refer to `gamma` for examples.
"""
def _argcheck(self, a):
allint = np.all(np.floor(a) == a)
allpos = np.all(a > 0)
if not allint:
# An Erlang distribution shouldn't really have a non-integer
# shape parameter, so warn the user.
warnings.warn(
'The shape parameter of the erlang distribution '
'has been given a non-integer value %r.' % (a,),
RuntimeWarning)
return allpos
def _fitstart(self, data):
# Override gamma_gen_fitstart so that an integer initial value is
# used. (Also regularize the division, to avoid issues when
# _skew(data) is 0 or close to 0.)
a = int(4.0 / (1e-8 + _skew(data)**2))
return super(gamma_gen, self)._fitstart(data, args=(a,))
# Trivial override of the fit method, so we can monkey-patch its
# docstring.
def fit(self, data, *args, **kwds):
return super(erlang_gen, self).fit(data, *args, **kwds)
if fit.__doc__ is not None:
fit.__doc__ = (rv_continuous.fit.__doc__ +
"""
Notes
-----
The Erlang distribution is generally defined to have integer values
for the shape parameter. This is not enforced by the `erlang` class.
When fitting the distribution, it will generally return a non-integer
value for the shape parameter. By using the keyword argument
`f0=<integer>`, the fit method can be constrained to fit the data to
a specific integer shape parameter.
""")
erlang = erlang_gen(a=0.0, name='erlang')
class gengamma_gen(rv_continuous):
"""A generalized gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gengamma` is::
gengamma.pdf(x, a, c) = abs(c) * x**(c*a-1) * exp(-x**c) / gamma(a)
for ``x >= 0``, ``a > 0``, and ``c != 0``.
`gengamma` takes ``a`` and ``c`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, c):
return (a > 0) & (c != 0)
def _pdf(self, x, a, c):
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
return np.log(abs(c)) + sc.xlogy(c*a - 1, x) - x**c - sc.gammaln(a)
def _cdf(self, x, a, c):
xc = x**c
val1 = sc.gammainc(a, xc)
val2 = sc.gammaincc(a, xc)
return np.where(c > 0, val1, val2)
def _sf(self, x, a, c):
xc = x**c
val1 = sc.gammainc(a, xc)
val2 = sc.gammaincc(a, xc)
return np.where(c > 0, val2, val1)
def _ppf(self, q, a, c):
val1 = sc.gammaincinv(a, q)
val2 = sc.gammainccinv(a, q)
return np.where(c > 0, val1, val2)**(1.0/c)
def _isf(self, q, a, c):
val1 = sc.gammaincinv(a, q)
val2 = sc.gammainccinv(a, q)
return np.where(c > 0, val2, val1)**(1.0/c)
def _munp(self, n, a, c):
# Pochhammer symbol: sc.pocha,n) = gamma(a+n)/gamma(a)
return sc.poch(a, n*1.0/c)
def _entropy(self, a, c):
val = sc.psi(a)
return a*(1-val) + 1.0/c*val + sc.gammaln(a) - np.log(abs(c))
gengamma = gengamma_gen(a=0.0, name='gengamma')
class genhalflogistic_gen(rv_continuous):
"""A generalized half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genhalflogistic` is::
genhalflogistic.pdf(x, c) =
2 * (1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2
for ``0 <= x <= 1/c``, and ``c > 0``.
`genhalflogistic` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
self.b = 1.0 / c
return c > 0
def _pdf(self, x, c):
limit = 1.0/c
tmp = np.asarray(1-c*x)
tmp0 = tmp**(limit-1)
tmp2 = tmp0*tmp
return 2*tmp0 / (1+tmp2)**2
def _cdf(self, x, c):
limit = 1.0/c
tmp = np.asarray(1-c*x)
tmp2 = tmp**(limit)
return (1.0-tmp2) / (1+tmp2)
def _ppf(self, q, c):
return 1.0/c*(1-((1.0-q)/(1.0+q))**c)
def _entropy(self, c):
return 2 - (2*c+1)*np.log(2)
genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic')
class gompertz_gen(rv_continuous):
"""A Gompertz (or truncated Gumbel) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gompertz` is::
gompertz.pdf(x, c) = c * exp(x) * exp(-c*(exp(x)-1))
for ``x >= 0``, ``c > 0``.
`gompertz` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return np.log(c) + x - c * sc.expm1(x)
def _cdf(self, x, c):
return -sc.expm1(-c * sc.expm1(x))
def _ppf(self, q, c):
return sc.log1p(-1.0 / c * sc.log1p(-q))
def _entropy(self, c):
return 1.0 - np.log(c) - np.exp(c)*sc.expn(1, c)
gompertz = gompertz_gen(a=0.0, name='gompertz')
class gumbel_r_gen(rv_continuous):
"""A right-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_l, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_r` is::
gumbel_r.pdf(x) = exp(-(x + exp(-x)))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return -x - np.exp(-x)
def _cdf(self, x):
return np.exp(-np.exp(-x))
def _logcdf(self, x):
return -np.exp(-x)
def _ppf(self, q):
return -np.log(-np.log(q))
def _stats(self):
return _EULER, np.pi*np.pi/6.0, 12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
def _entropy(self):
# http://en.wikipedia.org/wiki/Gumbel_distribution
return _EULER + 1.
gumbel_r = gumbel_r_gen(name='gumbel_r')
class gumbel_l_gen(rv_continuous):
"""A left-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_l` is::
gumbel_l.pdf(x) = exp(x - exp(x))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return x - np.exp(x)
def _cdf(self, x):
return -sc.expm1(-np.exp(x))
def _ppf(self, q):
return np.log(-sc.log1p(-q))
def _logsf(self, x):
return -np.exp(x)
def _sf(self, x):
return np.exp(-np.exp(x))
def _isf(self, x):
return np.log(-np.log(x))
def _stats(self):
return -_EULER, np.pi*np.pi/6.0, \
-12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return _EULER + 1.
gumbel_l = gumbel_l_gen(name='gumbel_l')
class halfcauchy_gen(rv_continuous):
"""A Half-Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfcauchy` is::
halfcauchy.pdf(x) = 2 / (pi * (1 + x**2))
for ``x >= 0``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 2.0/np.pi/(1.0+x*x)
def _logpdf(self, x):
return np.log(2.0/np.pi) - sc.log1p(x*x)
def _cdf(self, x):
return 2.0/np.pi*np.arctan(x)
def _ppf(self, q):
return np.tan(np.pi/2*q)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
def _entropy(self):
return np.log(2*np.pi)
halfcauchy = halfcauchy_gen(a=0.0, name='halfcauchy')
class halflogistic_gen(rv_continuous):
"""A half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halflogistic` is::
halflogistic.pdf(x) = 2 * exp(-x) / (1+exp(-x))**2 = 1/2 * sech(x/2)**2
for ``x >= 0``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return np.log(2) - x - 2. * sc.log1p(np.exp(-x))
def _cdf(self, x):
return np.tanh(x/2.0)
def _ppf(self, q):
return 2*np.arctanh(q)
def _munp(self, n):
if n == 1:
return 2*np.log(2)
if n == 2:
return np.pi*np.pi/3.0
if n == 3:
return 9*_ZETA3
if n == 4:
return 7*np.pi**4 / 15.0
return 2*(1-pow(2.0, 1-n))*sc.gamma(n+1)*sc.zeta(n, 1)
def _entropy(self):
return 2-np.log(2)
halflogistic = halflogistic_gen(a=0.0, name='halflogistic')
class halfnorm_gen(rv_continuous):
"""A half-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfnorm` is::
halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2)
for ``x > 0``.
`halfnorm` is a special case of `chi` with ``df == 1``.
%(after_notes)s
%(example)s
"""
def _rvs(self):
return abs(self._random_state.standard_normal(size=self._size))
def _pdf(self, x):
return np.sqrt(2.0/np.pi)*np.exp(-x*x/2.0)
def _logpdf(self, x):
return 0.5 * np.log(2.0/np.pi) - x*x/2.0
def _cdf(self, x):
return _norm_cdf(x)*2-1.0
def _ppf(self, q):
return sc.ndtri((1+q)/2.0)
def _stats(self):
return (np.sqrt(2.0/np.pi), 1-2.0/np.pi, np.sqrt(2)*(4-np.pi)/(np.pi-2)**1.5,
8*(np.pi-3)/(np.pi-2)**2)
def _entropy(self):
return 0.5*np.log(np.pi/2.0)+0.5
halfnorm = halfnorm_gen(a=0.0, name='halfnorm')
class hypsecant_gen(rv_continuous):
"""A hyperbolic secant continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `hypsecant` is::
hypsecant.pdf(x) = 1/pi * sech(x)
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 1.0/(np.pi*np.cosh(x))
def _cdf(self, x):
return 2.0/np.pi*np.arctan(np.exp(x))
def _ppf(self, q):
return np.log(np.tan(np.pi*q/2.0))
def _stats(self):
return 0, np.pi*np.pi/4, 0, 2
def _entropy(self):
return np.log(2*np.pi)
hypsecant = hypsecant_gen(name='hypsecant')
class gausshyper_gen(rv_continuous):
"""A Gauss hypergeometric continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gausshyper` is::
gausshyper.pdf(x, a, b, c, z) =
C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c)
for ``0 <= x <= 1``, ``a > 0``, ``b > 0``, and
``C = 1 / (B(a, b) F[2, 1](c, a; a+b; -z))``
`gausshyper` takes ``a``, ``b``, ``c`` and ``z`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b, c, z):
return (a > 0) & (b > 0) & (c == c) & (z == z)
def _pdf(self, x, a, b, c, z):
Cinv = sc.gamma(a)*sc.gamma(b)/sc.gamma(a+b)*sc.hyp2f1(c, a, a+b, -z)
return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c
def _munp(self, n, a, b, c, z):
fac = sc.beta(n+a, b) / sc.beta(a, b)
num = sc.hyp2f1(c, a+n, a+b+n, -z)
den = sc.hyp2f1(c, a, a+b, -z)
return fac*num / den
gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper')
class invgamma_gen(rv_continuous):
"""An inverted gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgamma` is::
invgamma.pdf(x, a) = x**(-a-1) / gamma(a) * exp(-1/x)
for x > 0, a > 0.
`invgamma` takes ``a`` as a shape parameter.
`invgamma` is a special case of `gengamma` with ``c == -1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, a):
return np.exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return -(a+1) * np.log(x) - sc.gammaln(a) - 1.0/x
def _cdf(self, x, a):
return sc.gammaincc(a, 1.0 / x)
def _ppf(self, q, a):
return 1.0 / sc.gammainccinv(a, q)
def _sf(self, x, a):
return sc.gammainc(a, 1.0 / x)
def _isf(self, q, a):
return 1.0 / sc.gammaincinv(a, q)
def _stats(self, a, moments='mvsk'):
m1 = _lazywhere(a > 1, (a,), lambda x: 1. / (x - 1.), np.inf)
m2 = _lazywhere(a > 2, (a,), lambda x: 1. / (x - 1.)**2 / (x - 2.),
np.inf)
g1, g2 = None, None
if 's' in moments:
g1 = _lazywhere(
a > 3, (a,),
lambda x: 4. * np.sqrt(x - 2.) / (x - 3.), np.nan)
if 'k' in moments:
g2 = _lazywhere(
a > 4, (a,),
lambda x: 6. * (5. * x - 11.) / (x - 3.) / (x - 4.), np.nan)
return m1, m2, g1, g2
def _entropy(self, a):
return a - (a+1.0) * sc.psi(a) + sc.gammaln(a)
invgamma = invgamma_gen(a=0.0, name='invgamma')
# scale is gamma from DATAPLOT and B from Regress
class invgauss_gen(rv_continuous):
"""An inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgauss` is::
invgauss.pdf(x, mu) = 1 / sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
for ``x > 0``.
`invgauss` takes ``mu`` as a shape parameter.
%(after_notes)s
When `mu` is too small, evaluating the cumulative distribution function will be
inaccurate due to ``cdf(mu -> 0) = inf * 0``.
NaNs are returned for ``mu <= 0.0028``.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, mu):
return self._random_state.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
return 1.0/np.sqrt(2*np.pi*x**3.0)*np.exp(-1.0/(2*x)*((x-mu)/mu)**2)
def _logpdf(self, x, mu):
return -0.5*np.log(2*np.pi) - 1.5*np.log(x) - ((x-mu)/mu)**2/(2*x)
def _cdf(self, x, mu):
fac = np.sqrt(1.0/x)
# Numerical accuracy for small `mu` is bad. See #869.
C1 = _norm_cdf(fac*(x-mu)/mu)
C1 += np.exp(1.0/mu) * _norm_cdf(-fac*(x+mu)/mu) * np.exp(1.0/mu)
return C1
def _stats(self, mu):
return mu, mu**3.0, 3*np.sqrt(mu), 15*mu
invgauss = invgauss_gen(a=0.0, name='invgauss')
class invweibull_gen(rv_continuous):
"""An inverted Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invweibull` is::
invweibull.pdf(x, c) = c * x**(-c-1) * exp(-x**(-c))
for ``x > 0``, ``c > 0``.
`invweibull` takes ``c`` as a shape parameter.
%(after_notes)s
References
----------
F.R.S. de Gusmao, E.M.M Ortega and G.M. Cordeiro, "The generalized inverse
Weibull distribution", Stat. Papers, vol. 52, pp. 591-619, 2011.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c):
xc1 = np.power(x, -c - 1.0)
xc2 = np.power(x, -c)
xc2 = np.exp(-xc2)
return c * xc1 * xc2
def _cdf(self, x, c):
xc1 = np.power(x, -c)
return np.exp(-xc1)
def _ppf(self, q, c):
return np.power(-np.log(q), -1.0/c)
def _munp(self, n, c):
return sc.gamma(1 - n / c)
def _entropy(self, c):
return 1+_EULER + _EULER / c - np.log(c)
invweibull = invweibull_gen(a=0, name='invweibull')
class johnsonsb_gen(rv_continuous):
"""A Johnson SB continuous random variable.
%(before_notes)s
See Also
--------
johnsonsu
Notes
-----
The probability density function for `johnsonsb` is::
johnsonsb.pdf(x, a, b) = b / (x*(1-x)) * phi(a + b * log(x/(1-x)))
for ``0 < x < 1`` and ``a, b > 0``, and ``phi`` is the normal pdf.
`johnsonsb` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
trm = _norm_pdf(a + b*np.log(x/(1.0-x)))
return b*1.0/(x*(1-x))*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b*np.log(x/(1.0-x)))
def _ppf(self, q, a, b):
return 1.0 / (1 + np.exp(-1.0 / b * (_norm_ppf(q) - a)))
johnsonsb = johnsonsb_gen(a=0.0, b=1.0, name='johnsonsb')
class johnsonsu_gen(rv_continuous):
"""A Johnson SU continuous random variable.
%(before_notes)s
See Also
--------
johnsonsb
Notes
-----
The probability density function for `johnsonsu` is::
johnsonsu.pdf(x, a, b) = b / sqrt(x**2 + 1) *
phi(a + b * log(x + sqrt(x**2 + 1)))
for all ``x, a, b > 0``, and `phi` is the normal pdf.
`johnsonsu` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
x2 = x*x
trm = _norm_pdf(a + b * np.log(x + np.sqrt(x2+1)))
return b*1.0/np.sqrt(x2+1.0)*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b * np.log(x + np.sqrt(x*x + 1)))
def _ppf(self, q, a, b):
return np.sinh((_norm_ppf(q) - a) / b)
johnsonsu = johnsonsu_gen(name='johnsonsu')
class laplace_gen(rv_continuous):
"""A Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `laplace` is::
laplace.pdf(x) = 1/2 * exp(-abs(x))
%(after_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.laplace(0, 1, size=self._size)
def _pdf(self, x):
return 0.5*np.exp(-abs(x))
def _cdf(self, x):
return np.where(x > 0, 1.0-0.5*np.exp(-x), 0.5*np.exp(x))
def _ppf(self, q):
return np.where(q > 0.5, -np.log(2*(1-q)), np.log(2*q))
def _stats(self):
return 0, 2, 0, 3
def _entropy(self):
return np.log(2)+1
laplace = laplace_gen(name='laplace')
class levy_gen(rv_continuous):
"""A Levy continuous random variable.
%(before_notes)s
See Also
--------
levy_stable, levy_l
Notes
-----
The probability density function for `levy` is::
levy.pdf(x) = 1 / (x * sqrt(2*pi*x)) * exp(-1/(2*x))
for ``x > 0``.
This is the same as the Levy-stable distribution with a=1/2 and b=1.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
return 1 / np.sqrt(2*np.pi*x) / x * np.exp(-1/(2*x))
def _cdf(self, x):
# Equivalent to 2*norm.sf(np.sqrt(1/x))
return sc.erfc(np.sqrt(0.5 / x))
def _ppf(self, q):
# Equivalent to 1.0/(norm.isf(q/2)**2) or 0.5/(erfcinv(q)**2)
val = -sc.ndtri(q/2)
return 1.0 / (val * val)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
levy = levy_gen(a=0.0, name="levy")
class levy_l_gen(rv_continuous):
"""A left-skewed Levy continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_stable
Notes
-----
The probability density function for `levy_l` is::
levy_l.pdf(x) = 1 / (abs(x) * sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x)))
for ``x < 0``.
This is the same as the Levy-stable distribution with a=1/2 and b=-1.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
ax = abs(x)
return 1/np.sqrt(2*np.pi*ax)/ax*np.exp(-1/(2*ax))
def _cdf(self, x):
ax = abs(x)
return 2 * _norm_cdf(1 / np.sqrt(ax)) - 1
def _ppf(self, q):
val = _norm_ppf((q + 1.0) / 2)
return -1.0 / (val * val)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
levy_l = levy_l_gen(b=0.0, name="levy_l")
class levy_stable_gen(rv_continuous):
"""A Levy-stable continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_l
Notes
-----
Levy-stable distribution (only random variates available -- ignore other
docs)
%(after_notes)s
%(example)s
"""
def _rvs(self, alpha, beta):
def alpha1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
return (2/np.pi*(np.pi/2 + bTH)*tanTH -
beta*np.log((np.pi/2*W*cosTH)/(np.pi/2 + bTH)))
def beta0func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
return (W/(cosTH/np.tan(aTH) + np.sin(TH)) *
((np.cos(aTH) + np.sin(aTH)*tanTH)/W)**(1.0/alpha))
def otherwise(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
# alpha is not 1 and beta is not 0
val0 = beta*np.tan(np.pi*alpha/2)
th0 = np.arctan(val0)/alpha
val3 = W/(cosTH/np.tan(alpha*(th0 + TH)) + np.sin(TH))
res3 = val3*((np.cos(aTH) + np.sin(aTH)*tanTH -
val0*(np.sin(aTH) - np.cos(aTH)*tanTH))/W)**(1.0/alpha)
return res3
def alphanot1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
res = _lazywhere(beta == 0,
(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W),
beta0func, f2=otherwise)
return res
sz = self._size
alpha = broadcast_to(alpha, sz)
beta = broadcast_to(beta, sz)
TH = uniform.rvs(loc=-np.pi/2.0, scale=np.pi, size=sz,
random_state=self._random_state)
W = expon.rvs(size=sz, random_state=self._random_state)
aTH = alpha*TH
bTH = beta*TH
cosTH = np.cos(TH)
tanTH = np.tan(TH)
res = _lazywhere(alpha == 1, (alpha, beta, TH, aTH, bTH, cosTH, tanTH, W),
alpha1func, f2=alphanot1func)
return res
def _argcheck(self, alpha, beta):
return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1)
def _pdf(self, x, alpha, beta):
raise NotImplementedError
levy_stable = levy_stable_gen(name='levy_stable')
class logistic_gen(rv_continuous):
"""A logistic (or Sech-squared) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `logistic` is::
logistic.pdf(x) = exp(-x) / (1+exp(-x))**2
`logistic` is a special case of `genlogistic` with ``c == 1``.
%(after_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.logistic(size=self._size)
def _pdf(self, x):
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return -x - 2. * sc.log1p(np.exp(-x))
def _cdf(self, x):
return sc.expit(x)
def _ppf(self, q):
return sc.logit(q)
def _sf(self, x):
return sc.expit(-x)
def _isf(self, q):
return -sc.logit(q)
def _stats(self):
return 0, np.pi*np.pi/3.0, 0, 6.0/5.0
def _entropy(self):
# http://en.wikipedia.org/wiki/Logistic_distribution
return 2.0
logistic = logistic_gen(name='logistic')
class loggamma_gen(rv_continuous):
"""A log gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loggamma` is::
loggamma.pdf(x, c) = exp(c*x-exp(x)) / gamma(c)
for all ``x, c > 0``.
`loggamma` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, c):
return np.log(self._random_state.gamma(c, size=self._size))
def _pdf(self, x, c):
return np.exp(c*x-np.exp(x)-sc.gammaln(c))
def _cdf(self, x, c):
return sc.gammainc(c, np.exp(x))
def _ppf(self, q, c):
return np.log(sc.gammaincinv(c, q))
def _stats(self, c):
# See, for example, "A Statistical Study of Log-Gamma Distribution", by
# Ping Shing Chan (thesis, McMaster University, 1993).
mean = sc.digamma(c)
var = sc.polygamma(1, c)
skewness = sc.polygamma(2, c) / np.power(var, 1.5)
excess_kurtosis = sc.polygamma(3, c) / (var*var)
return mean, var, skewness, excess_kurtosis
loggamma = loggamma_gen(name='loggamma')
class loglaplace_gen(rv_continuous):
"""A log-Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loglaplace` is::
loglaplace.pdf(x, c) = c / 2 * x**(c-1), for 0 < x < 1
= c / 2 * x**(-c-1), for x >= 1
for ``c > 0``.
`loglaplace` takes ``c`` as a shape parameter.
%(after_notes)s
References
----------
T.J. Kozubowski and K. Podgorski, "A log-Laplace growth rate model",
The Mathematical Scientist, vol. 28, pp. 49-60, 2003.
%(example)s
"""
def _pdf(self, x, c):
cd2 = c/2.0
c = np.where(x < 1, c, -c)
return cd2*x**(c-1)
def _cdf(self, x, c):
return np.where(x < 1, 0.5*x**c, 1-0.5*x**(-c))
def _ppf(self, q, c):
return np.where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c))
def _munp(self, n, c):
return c**2 / (c**2 - n**2)
def _entropy(self, c):
return np.log(2.0/c) + 1.0
loglaplace = loglaplace_gen(a=0.0, name='loglaplace')
def _lognorm_logpdf(x, s):
return _lazywhere(x != 0, (x, s),
lambda x, s: -np.log(x)**2 / (2*s**2) - np.log(s*x*np.sqrt(2*np.pi)),
-np.inf)
class lognorm_gen(rv_continuous):
"""A lognormal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lognorm` is::
lognorm.pdf(x, s) = 1 / (s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2)
for ``x > 0``, ``s > 0``.
`lognorm` takes ``s`` as a shape parameter.
%(after_notes)s
A common parametrization for a lognormal random variable ``Y`` is in
terms of the mean, ``mu``, and standard deviation, ``sigma``, of the
unique normally distributed random variable ``X`` such that exp(X) = Y.
This parametrization corresponds to setting ``s = sigma`` and ``scale =
exp(mu)``.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, s):
return np.exp(s * self._random_state.standard_normal(self._size))
def _pdf(self, x, s):
return np.exp(self._logpdf(x, s))
def _logpdf(self, x, s):
return _lognorm_logpdf(x, s)
def _cdf(self, x, s):
return _norm_cdf(np.log(x) / s)
def _logcdf(self, x, s):
return _norm_logcdf(np.log(x) / s)
def _ppf(self, q, s):
return np.exp(s * _norm_ppf(q))
def _sf(self, x, s):
return _norm_sf(np.log(x) / s)
def _logsf(self, x, s):
return _norm_logsf(np.log(x) / s)
def _stats(self, s):
p = np.exp(s*s)
mu = np.sqrt(p)
mu2 = p*(p-1)
g1 = np.sqrt((p-1))*(2+p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self, s):
return 0.5 * (1 + np.log(2*np.pi) + 2 * np.log(s))
lognorm = lognorm_gen(a=0.0, name='lognorm')
class gilbrat_gen(rv_continuous):
"""A Gilbrat continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gilbrat` is::
gilbrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2)
`gilbrat` is a special case of `lognorm` with ``s = 1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self):
return np.exp(self._random_state.standard_normal(self._size))
def _pdf(self, x):
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return _lognorm_logpdf(x, 1.0)
def _cdf(self, x):
return _norm_cdf(np.log(x))
def _ppf(self, q):
return np.exp(_norm_ppf(q))
def _stats(self):
p = np.e
mu = np.sqrt(p)
mu2 = p * (p - 1)
g1 = np.sqrt((p - 1)) * (2 + p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self):
return 0.5 * np.log(2 * np.pi) + 0.5
gilbrat = gilbrat_gen(a=0.0, name='gilbrat')
class maxwell_gen(rv_continuous):
"""A Maxwell continuous random variable.
%(before_notes)s
Notes
-----
A special case of a `chi` distribution, with ``df = 3``, ``loc = 0.0``,
and given ``scale = a``, where ``a`` is the parameter used in the
Mathworld description [1]_.
The probability density function for `maxwell` is::
maxwell.pdf(x) = sqrt(2/pi)x**2 * exp(-x**2/2)
for ``x > 0``.
%(after_notes)s
References
----------
.. [1] http://mathworld.wolfram.com/MaxwellDistribution.html
%(example)s
"""
def _rvs(self):
return chi.rvs(3.0, size=self._size, random_state=self._random_state)
def _pdf(self, x):
return np.sqrt(2.0/np.pi)*x*x*np.exp(-x*x/2.0)
def _cdf(self, x):
return sc.gammainc(1.5, x*x/2.0)
def _ppf(self, q):
return np.sqrt(2*sc.gammaincinv(1.5, q))
def _stats(self):
val = 3*np.pi-8
return (2*np.sqrt(2.0/np.pi),
3-8/np.pi,
np.sqrt(2)*(32-10*np.pi)/val**1.5,
(-12*np.pi*np.pi + 160*np.pi - 384) / val**2.0)
def _entropy(self):
return _EULER + 0.5*np.log(2*np.pi)-0.5
maxwell = maxwell_gen(a=0.0, name='maxwell')
class mielke_gen(rv_continuous):
"""A Mielke's Beta-Kappa continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `mielke` is::
mielke.pdf(x, k, s) = k * x**(k-1) / (1+x**s)**(1+k/s)
for ``x > 0``.
`mielke` takes ``k`` and ``s`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, k, s):
return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s)
def _cdf(self, x, k, s):
return x**k / (1.0+x**s)**(k*1.0/s)
def _ppf(self, q, k, s):
qsk = pow(q, s*1.0/k)
return pow(qsk/(1.0-qsk), 1.0/s)
mielke = mielke_gen(a=0.0, name='mielke')
class kappa4_gen(rv_continuous):
"""Kappa 4 parameter distribution.
%(before_notes)s
Notes
-----
The probability density function for kappa4 is::
kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
(1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1)
if ``h`` and ``k`` are not equal to 0.
If ``h`` or ``k`` are zero then the pdf can be simplified:
h = 0 and k != 0::
kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
exp(-(1.0 - k*x)**(1.0/k))
h != 0 and k = 0::
kappa4.pdf(x, h, k) = exp(-x)*(1.0 - h*exp(-x))**(1.0/h - 1.0)
h = 0 and k = 0::
kappa4.pdf(x, h, k) = exp(-x)*exp(-exp(-x))
kappa4 takes ``h`` and ``k`` as shape parameters.
The kappa4 distribution returns other distributions when certain
``h`` and ``k`` values are used.
+------+-------------+----------------+------------------+
| h | k=0.0 | k=1.0 | -inf<=k<=inf |
+======+=============+================+==================+
| -1.0 | Logistic | | Generalized |
| | | | Logistic(1) |
| | | | |
| | logistic(x) | | |
+------+-------------+----------------+------------------+
| 0.0 | Gumbel | Reverse | Generalized |
| | | Exponential(2) | Extreme Value |
| | | | |
| | gumbel_r(x) | | genextreme(x, k) |
+------+-------------+----------------+------------------+
| 1.0 | Exponential | Uniform | Generalized |
| | | | Pareto |
| | | | |
| | expon(x) | uniform(x) | genpareto(x, -k) |
+------+-------------+----------------+------------------+
(1) There are at least five generalized logistic distributions.
Four are described here:
https://en.wikipedia.org/wiki/Generalized_logistic_distribution
The "fifth" one is the one kappa4 should match which currently
isn't implemented in scipy:
https://en.wikipedia.org/wiki/Talk:Generalized_logistic_distribution
http://www.mathwave.com/help/easyfit/html/analyses/distributions/gen_logistic.html
(2) This distribution is currently not in scipy.
References
----------
J.C. Finney, "Optimization of a Skewed Logistic Distribution With Respect
to the Kolmogorov-Smirnov Test", A Dissertation Submitted to the Graduate
Faculty of the Louisiana State University and Agricultural and Mechanical
College, (August, 2004),
http://etd.lsu.edu/docs/available/etd-05182004-144851/unrestricted/Finney_dis.pdf
J.R.M. Hosking, "The four-parameter kappa distribution". IBM J. Res.
Develop. 38 (3), 25 1-258 (1994).
B. Kumphon, A. Kaew-Man, P. Seenoi, "A Rainfall Distribution for the Lampao
Site in the Chi River Basin, Thailand", Journal of Water Resource and
Protection, vol. 4, 866-869, (2012).
http://file.scirp.org/pdf/JWARP20121000009_14676002.pdf
C. Winchester, "On Estimation of the Four-Parameter Kappa Distribution", A
Thesis Submitted to Dalhousie University, Halifax, Nova Scotia, (March
2000).
http://www.nlc-bnc.ca/obj/s4/f2/dsk2/ftp01/MQ57336.pdf
%(after_notes)s
%(example)s
"""
def _argcheck(self, h, k):
condlist = [np.logical_and(h > 0, k > 0),
np.logical_and(h > 0, k == 0),
np.logical_and(h > 0, k < 0),
np.logical_and(h <= 0, k > 0),
np.logical_and(h <= 0, k == 0),
np.logical_and(h <= 0, k < 0)]
def f0(h, k):
return (1.0 - h**(-k))/k
def f1(h, k):
return np.log(h)
def f3(h, k):
a = np.empty(np.shape(h))
a[:] = -np.inf
return a
def f5(h, k):
return 1.0/k
self.a = _lazyselect(condlist,
[f0, f1, f0, f3, f3, f5],
[h, k],
default=np.nan)
def f0(h, k):
return 1.0/k
def f1(h, k):
a = np.empty(np.shape(h))
a[:] = np.inf
return a
self.b = _lazyselect(condlist,
[f0, f1, f1, f0, f1, f1],
[h, k],
default=np.nan)
return h == h
def _pdf(self, x, h, k):
return np.exp(self._logpdf(x, h, k))
def _logpdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*(
1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1.0)
logpdf = ...
'''
return (sc.xlog1py(1.0/k - 1.0, -k*x) +
sc.xlog1py(1.0/h - 1.0, -h*(1.0 - k*x)**(1.0/k)))
def f1(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*np.exp(-(
1.0 - k*x)**(1.0/k))
logpdf = ...
'''
return sc.xlog1py(1.0/k - 1.0, -k*x) - (1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''pdf = np.exp(-x)*(1.0 - h*np.exp(-x))**(1.0/h - 1.0)
logpdf = ...
'''
return -x + sc.xlog1py(1.0/h - 1.0, -h*np.exp(-x))
def f3(x, h, k):
'''pdf = np.exp(-x-np.exp(-x))
logpdf = ...
'''
return -x - np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _cdf(self, x, h, k):
return np.exp(self._logcdf(x, h, k))
def _logcdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''cdf = (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*(1.0 - k*x)**(1.0/k))
def f1(x, h, k):
'''cdf = np.exp(-(1.0 - k*x)**(1.0/k))
logcdf = ...
'''
return -(1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''cdf = (1.0 - h*np.exp(-x))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*np.exp(-x))
def f3(x, h, k):
'''cdf = np.exp(-np.exp(-x))
logcdf = ...
'''
return -np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _ppf(self, q, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(q, h, k):
return 1.0/k*(1.0 - ((1.0 - (q**h))/h)**k)
def f1(q, h, k):
return 1.0/k*(1.0 - (-np.log(q))**k)
def f2(q, h, k):
'''ppf = -np.log((1.0 - (q**h))/h)
'''
return -sc.log1p(-(q**h)) + np.log(h)
def f3(q, h, k):
return -np.log(-np.log(q))
return _lazyselect(condlist,
[f0, f1, f2, f3],
[q, h, k],
default=np.nan)
def _stats(self, h, k):
if h >= 0 and k >= 0:
maxr = 5
elif h < 0 and k >= 0:
maxr = int(-1.0/h*k)
elif k < 0:
maxr = int(-1.0/k)
else:
maxr = 5
outputs = [None if r < maxr else np.nan for r in range(1, 5)]
return outputs[:]
kappa4 = kappa4_gen(name='kappa4')
class kappa3_gen(rv_continuous):
"""Kappa 3 parameter distribution.
%(before_notes)s
Notes
-----
The probability density function for `kappa` is::
kappa3.pdf(x, a) =
a*[a + x**a]**(-(a + 1)/a), for ``x > 0``
0.0, for ``x <= 0``
`kappa3` takes ``a`` as a shape parameter and ``a > 0``.
References
----------
P.W. Mielke and E.S. Johnson, "Three-Parameter Kappa Distribution Maximum
Likelihood and Likelihood Ratio Tests", Methods in Weather Research,
701-707, (September, 1973),
http://docs.lib.noaa.gov/rescue/mwr/101/mwr-101-09-0701.pdf
B. Kumphon, "Maximum Entropy and Maximum Likelihood Estimation for the
Three-Parameter Kappa Distribution", Open Journal of Statistics, vol 2,
415-419 (2012)
http://file.scirp.org/pdf/OJS20120400011_95789012.pdf
%(after_notes)s
%(example)s
"""
def _argcheck(self, a):
return a > 0
def _pdf(self, x, a):
return a*(a + x**a)**(-1.0/a-1)
def _cdf(self, x, a):
return x*(a + x**a)**(-1.0/a)
def _ppf(self, q, a):
return (a/(q**-a - 1.0))**(1.0/a)
def _stats(self, a):
outputs = [None if i < a else np.nan for i in range(1, 5)]
return outputs[:]
kappa3 = kappa3_gen(a=0.0, name='kappa3')
class nakagami_gen(rv_continuous):
"""A Nakagami continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nakagami` is::
nakagami.pdf(x, nu) = 2 * nu**nu / gamma(nu) *
x**(2*nu-1) * exp(-nu*x**2)
for ``x > 0``, ``nu > 0``.
`nakagami` takes ``nu`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, nu):
return 2*nu**nu/sc.gamma(nu)*(x**(2*nu-1.0))*np.exp(-nu*x*x)
def _cdf(self, x, nu):
return sc.gammainc(nu, nu*x*x)
def _ppf(self, q, nu):
return np.sqrt(1.0/nu*sc.gammaincinv(nu, q))
def _stats(self, nu):
mu = sc.gamma(nu+0.5)/sc.gamma(nu)/np.sqrt(nu)
mu2 = 1.0-mu*mu
g1 = mu * (1 - 4*nu*mu2) / 2.0 / nu / np.power(mu2, 1.5)
g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1
g2 /= nu*mu2**2.0
return mu, mu2, g1, g2
nakagami = nakagami_gen(a=0.0, name="nakagami")
class ncx2_gen(rv_continuous):
"""A non-central chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncx2` is::
ncx2.pdf(x, df, nc) = exp(-(nc+x)/2) * 1/2 * (x/nc)**((df-2)/4)
* I[(df-2)/2](sqrt(nc*x))
for ``x > 0``.
`ncx2` takes ``df`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, df, nc):
return self._random_state.noncentral_chisquare(df, nc, self._size)
def _logpdf(self, x, df, nc):
return _ncx2_log_pdf(x, df, nc)
def _pdf(self, x, df, nc):
return _ncx2_pdf(x, df, nc)
def _cdf(self, x, df, nc):
return _ncx2_cdf(x, df, nc)
def _ppf(self, q, df, nc):
return sc.chndtrix(q, df, nc)
def _stats(self, df, nc):
val = df + 2.0*nc
return (df + nc,
2*val,
np.sqrt(8)*(val+nc)/val**1.5,
12.0*(val+2*nc)/val**2.0)
ncx2 = ncx2_gen(a=0.0, name='ncx2')
class ncf_gen(rv_continuous):
"""A non-central F distribution continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncf` is::
ncf.pdf(x, df1, df2, nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2))) *
df1**(df1/2) * df2**(df2/2) * x**(df1/2-1) *
(df2+df1*x)**(-(df1+df2)/2) *
gamma(df1/2)*gamma(1+df2/2) *
L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2))) /
(B(v1/2, v2/2) * gamma((v1+v2)/2))
for ``df1, df2, nc > 0``.
`ncf` takes ``df1``, ``df2`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, dfn, dfd, nc):
return self._random_state.noncentral_f(dfn, dfd, nc, self._size)
def _pdf_skip(self, x, dfn, dfd, nc):
n1, n2 = dfn, dfd
term = -nc/2+nc*n1*x/(2*(n2+n1*x)) + sc.gammaln(n1/2.)+sc.gammaln(1+n2/2.)
term -= sc.gammaln((n1+n2)/2.0)
Px = np.exp(term)
Px *= n1**(n1/2) * n2**(n2/2) * x**(n1/2-1)
Px *= (n2+n1*x)**(-(n1+n2)/2)
Px *= sc.assoc_laguerre(-nc*n1*x/(2.0*(n2+n1*x)), n2/2, n1/2-1)
Px /= sc.beta(n1/2, n2/2)
# This function does not have a return. Drop it for now, the generic
# function seems to work OK.
def _cdf(self, x, dfn, dfd, nc):
return sc.ncfdtr(dfn, dfd, nc, x)
def _ppf(self, q, dfn, dfd, nc):
return sc.ncfdtri(dfn, dfd, nc, q)
def _munp(self, n, dfn, dfd, nc):
val = (dfn * 1.0/dfd)**n
term = sc.gammaln(n+0.5*dfn) + sc.gammaln(0.5*dfd-n) - sc.gammaln(dfd*0.5)
val *= np.exp(-nc / 2.0+term)
val *= sc.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc)
return val
def _stats(self, dfn, dfd, nc):
mu = np.where(dfd <= 2, np.inf, dfd / (dfd-2.0)*(1+nc*1.0/dfn))
mu2 = np.where(dfd <= 4, np.inf, 2*(dfd*1.0/dfn)**2.0 *
((dfn+nc/2.0)**2.0 + (dfn+nc)*(dfd-2.0)) /
((dfd-2.0)**2.0 * (dfd-4.0)))
return mu, mu2, None, None
ncf = ncf_gen(a=0.0, name='ncf')
class t_gen(rv_continuous):
"""A Student's T continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `t` is::
gamma((df+1)/2)
t.pdf(x, df) = ---------------------------------------------------
sqrt(pi*df) * gamma(df/2) * (1+x**2/df)**((df+1)/2)
for ``df > 0``.
`t` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df):
return self._random_state.standard_t(df, size=self._size)
def _pdf(self, x, df):
r = np.asarray(df*1.0)
Px = np.exp(sc.gammaln((r+1)/2)-sc.gammaln(r/2))
Px /= np.sqrt(r*np.pi)*(1+(x**2)/r)**((r+1)/2)
return Px
def _logpdf(self, x, df):
r = df*1.0
lPx = sc.gammaln((r+1)/2)-sc.gammaln(r/2)
lPx -= 0.5*np.log(r*np.pi) + (r+1)/2*np.log(1+(x**2)/r)
return lPx
def _cdf(self, x, df):
return sc.stdtr(df, x)
def _sf(self, x, df):
return sc.stdtr(df, -x)
def _ppf(self, q, df):
return sc.stdtrit(df, q)
def _isf(self, q, df):
return -sc.stdtrit(df, q)
def _stats(self, df):
mu2 = _lazywhere(df > 2, (df,),
lambda df: df / (df-2.0),
np.inf)
g1 = np.where(df > 3, 0.0, np.nan)
g2 = _lazywhere(df > 4, (df,),
lambda df: 6.0 / (df-4.0),
np.nan)
return 0, mu2, g1, g2
t = t_gen(name='t')
class nct_gen(rv_continuous):
"""A non-central Student's T continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nct` is::
df**(df/2) * gamma(df+1)
nct.pdf(x, df, nc) = ----------------------------------------------------
2**df*exp(nc**2/2) * (df+x**2)**(df/2) * gamma(df/2)
for ``df > 0``.
`nct` takes ``df`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, df, nc):
return (df > 0) & (nc == nc)
def _rvs(self, df, nc):
sz, rndm = self._size, self._random_state
n = norm.rvs(loc=nc, size=sz, random_state=rndm)
c2 = chi2.rvs(df, size=sz, random_state=rndm)
return n * np.sqrt(df) / np.sqrt(c2)
def _pdf(self, x, df, nc):
n = df*1.0
nc = nc*1.0
x2 = x*x
ncx2 = nc*nc*x2
fac1 = n + x2
trm1 = n/2.*np.log(n) + sc.gammaln(n+1)
trm1 -= n*np.log(2)+nc*nc/2.+(n/2.)*np.log(fac1)+sc.gammaln(n/2.)
Px = np.exp(trm1)
valF = ncx2 / (2*fac1)
trm1 = np.sqrt(2)*nc*x*sc.hyp1f1(n/2+1, 1.5, valF)
trm1 /= np.asarray(fac1*sc.gamma((n+1)/2))
trm2 = sc.hyp1f1((n+1)/2, 0.5, valF)
trm2 /= np.asarray(np.sqrt(fac1)*sc.gamma(n/2+1))
Px *= trm1+trm2
return Px
def _cdf(self, x, df, nc):
return sc.nctdtr(df, nc, x)
def _ppf(self, q, df, nc):
return sc.nctdtrit(df, nc, q)
def _stats(self, df, nc, moments='mv'):
#
# See D. Hogben, R.S. Pinkham, and M.B. Wilk,
# 'The moments of the non-central t-distribution'
# Biometrika 48, p. 465 (2961).
# e.g. http://www.jstor.org/stable/2332772 (gated)
#
mu, mu2, g1, g2 = None, None, None, None
gfac = sc.gamma(df/2.-0.5) / sc.gamma(df/2.)
c11 = np.sqrt(df/2.) * gfac
c20 = df / (df-2.)
c22 = c20 - c11*c11
mu = np.where(df > 1, nc*c11, np.inf)
mu2 = np.where(df > 2, c22*nc*nc + c20, np.inf)
if 's' in moments:
c33t = df * (7.-2.*df) / (df-2.) / (df-3.) + 2.*c11*c11
c31t = 3.*df / (df-2.) / (df-3.)
mu3 = (c33t*nc*nc + c31t) * c11*nc
g1 = np.where(df > 3, mu3 / np.power(mu2, 1.5), np.nan)
#kurtosis
if 'k' in moments:
c44 = df*df / (df-2.) / (df-4.)
c44 -= c11*c11 * 2.*df*(5.-df) / (df-2.) / (df-3.)
c44 -= 3.*c11**4
c42 = df / (df-4.) - c11*c11 * (df-1.) / (df-3.)
c42 *= 6.*df / (df-2.)
c40 = 3.*df*df / (df-2.) / (df-4.)
mu4 = c44 * nc**4 + c42*nc**2 + c40
g2 = np.where(df > 4, mu4/mu2**2 - 3., np.nan)
return mu, mu2, g1, g2
nct = nct_gen(name="nct")
class pareto_gen(rv_continuous):
"""A Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pareto` is::
pareto.pdf(x, b) = b / x**(b+1)
for ``x >= 1``, ``b > 0``.
`pareto` takes ``b`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, b):
return b * x**(-b-1)
def _cdf(self, x, b):
return 1 - x**(-b)
def _ppf(self, q, b):
return pow(1-q, -1.0/b)
def _stats(self, b, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
if 'm' in moments:
mask = b > 1
bt = np.extract(mask, b)
mu = valarray(np.shape(b), value=np.inf)
np.place(mu, mask, bt / (bt-1.0))
if 'v' in moments:
mask = b > 2
bt = np.extract(mask, b)
mu2 = valarray(np.shape(b), value=np.inf)
np.place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2)
if 's' in moments:
mask = b > 3
bt = np.extract(mask, b)
g1 = valarray(np.shape(b), value=np.nan)
vals = 2 * (bt + 1.0) * np.sqrt(bt - 2.0) / ((bt - 3.0) * np.sqrt(bt))
np.place(g1, mask, vals)
if 'k' in moments:
mask = b > 4
bt = np.extract(mask, b)
g2 = valarray(np.shape(b), value=np.nan)
vals = (6.0*np.polyval([1.0, 1.0, -6, -2], bt) /
np.polyval([1.0, -7.0, 12.0, 0.0], bt))
np.place(g2, mask, vals)
return mu, mu2, g1, g2
def _entropy(self, c):
return 1 + 1.0/c - np.log(c)
pareto = pareto_gen(a=1.0, name="pareto")
class lomax_gen(rv_continuous):
"""A Lomax (Pareto of the second kind) continuous random variable.
%(before_notes)s
Notes
-----
The Lomax distribution is a special case of the Pareto distribution, with
(loc=-1.0).
The probability density function for `lomax` is::
lomax.pdf(x, c) = c / (1+x)**(c+1)
for ``x >= 0``, ``c > 0``.
`lomax` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return c*1.0/(1.0+x)**(c+1.0)
def _logpdf(self, x, c):
return np.log(c) - (c+1)*sc.log1p(x)
def _cdf(self, x, c):
return -sc.expm1(-c*sc.log1p(x))
def _sf(self, x, c):
return np.exp(-c*sc.log1p(x))
def _logsf(self, x, c):
return -c*sc.log1p(x)
def _ppf(self, q, c):
return sc.expm1(-sc.log1p(-q)/c)
def _stats(self, c):
mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
return mu, mu2, g1, g2
def _entropy(self, c):
return 1+1.0/c-np.log(c)
lomax = lomax_gen(a=0.0, name="lomax")
class pearson3_gen(rv_continuous):
"""A pearson type III continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pearson3` is::
pearson3.pdf(x, skew) = abs(beta) / gamma(alpha) *
(beta * (x - zeta))**(alpha - 1) * exp(-beta*(x - zeta))
where::
beta = 2 / (skew * stddev)
alpha = (stddev * beta)**2
zeta = loc - alpha / beta
`pearson3` takes ``skew`` as a shape parameter.
%(after_notes)s
%(example)s
References
----------
R.W. Vogel and D.E. McMartin, "Probability Plot Goodness-of-Fit and
Skewness Estimation Procedures for the Pearson Type 3 Distribution", Water
Resources Research, Vol.27, 3149-3158 (1991).
L.R. Salvosa, "Tables of Pearson's Type III Function", Ann. Math. Statist.,
Vol.1, 191-198 (1930).
"Using Modern Computing Tools to Fit the Pearson Type III Distribution to
Aviation Loads Data", Office of Aviation Research (2003).
"""
def _preprocess(self, x, skew):
# The real 'loc' and 'scale' are handled in the calling pdf(...). The
# local variables 'loc' and 'scale' within pearson3._pdf are set to
# the defaults just to keep them as part of the equations for
# documentation.
loc = 0.0
scale = 1.0
# If skew is small, return _norm_pdf. The divide between pearson3
# and norm was found by brute force and is approximately a skew of
# 0.000016. No one, I hope, would actually use a skew value even
# close to this small.
norm2pearson_transition = 0.000016
ans, x, skew = np.broadcast_arrays([1.0], x, skew)
ans = ans.copy()
# mask is True where skew is small enough to use the normal approx.
mask = np.absolute(skew) < norm2pearson_transition
invmask = ~mask
beta = 2.0 / (skew[invmask] * scale)
alpha = (scale * beta)**2
zeta = loc - alpha / beta
transx = beta * (x[invmask] - zeta)
return ans, x, transx, mask, invmask, beta, alpha, zeta
def _argcheck(self, skew):
# The _argcheck function in rv_continuous only allows positive
# arguments. The skew argument for pearson3 can be zero (which I want
# to handle inside pearson3._pdf) or negative. So just return True
# for all skew args.
return np.ones(np.shape(skew), dtype=bool)
def _stats(self, skew):
_, _, _, _, _, beta, alpha, zeta = (
self._preprocess([1], skew))
m = zeta + alpha / beta
v = alpha / (beta**2)
s = 2.0 / (alpha**0.5) * np.sign(beta)
k = 6.0 / alpha
return m, v, s, k
def _pdf(self, x, skew):
# Do the calculation in _logpdf since helps to limit
# overflow/underflow problems
ans = np.exp(self._logpdf(x, skew))
if ans.ndim == 0:
if np.isnan(ans):
return 0.0
return ans
ans[np.isnan(ans)] = 0.0
return ans
def _logpdf(self, x, skew):
# PEARSON3 logpdf GAMMA logpdf
# np.log(abs(beta))
# + (alpha - 1)*np.log(beta*(x - zeta)) + (a - 1)*np.log(x)
# - beta*(x - zeta) - x
# - sc.gammalnalpha) - sc.gammalna)
ans, x, transx, mask, invmask, beta, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = np.log(_norm_pdf(x[mask]))
ans[invmask] = np.log(abs(beta)) + gamma._logpdf(transx, alpha)
return ans
def _cdf(self, x, skew):
ans, x, transx, mask, invmask, _, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = _norm_cdf(x[mask])
ans[invmask] = gamma._cdf(transx, alpha)
return ans
def _rvs(self, skew):
skew = broadcast_to(skew, self._size)
ans, _, _, mask, invmask, beta, alpha, zeta = (
self._preprocess([0], skew))
nsmall = mask.sum()
nbig = mask.size - nsmall
ans[mask] = self._random_state.standard_normal(nsmall)
ans[invmask] = (self._random_state.standard_gamma(alpha, nbig)/beta +
zeta)
if self._size == ():
ans = ans[0]
return ans
def _ppf(self, q, skew):
ans, q, _, mask, invmask, beta, alpha, zeta = (
self._preprocess(q, skew))
ans[mask] = _norm_ppf(q[mask])
ans[invmask] = sc.gammaincinv(alpha, q[invmask])/beta + zeta
return ans
pearson3 = pearson3_gen(name="pearson3")
class powerlaw_gen(rv_continuous):
"""A power-function continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlaw` is::
powerlaw.pdf(x, a) = a * x**(a-1)
for ``0 <= x <= 1``, ``a > 0``.
`powerlaw` takes ``a`` as a shape parameter.
%(after_notes)s
`powerlaw` is a special case of `beta` with ``b == 1``.
%(example)s
"""
def _pdf(self, x, a):
return a*x**(a-1.0)
def _logpdf(self, x, a):
return np.log(a) + sc.xlogy(a - 1, x)
def _cdf(self, x, a):
return x**(a*1.0)
def _logcdf(self, x, a):
return a*np.log(x)
def _ppf(self, q, a):
return pow(q, 1.0/a)
def _stats(self, a):
return (a / (a + 1.0),
a / (a + 2.0) / (a + 1.0) ** 2,
-2.0 * ((a - 1.0) / (a + 3.0)) * np.sqrt((a + 2.0) / a),
6 * np.polyval([1, -1, -6, 2], a) / (a * (a + 3.0) * (a + 4)))
def _entropy(self, a):
return 1 - 1.0/a - np.log(a)
powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw")
class powerlognorm_gen(rv_continuous):
"""A power log-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlognorm` is::
powerlognorm.pdf(x, c, s) = c / (x*s) * phi(log(x)/s) *
(Phi(-log(x)/s))**(c-1),
where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf,
and ``x > 0``, ``s, c > 0``.
`powerlognorm` takes ``c`` and ``s`` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c, s):
return (c/(x*s) * _norm_pdf(np.log(x)/s) *
pow(_norm_cdf(-np.log(x)/s), c*1.0-1.0))
def _cdf(self, x, c, s):
return 1.0 - pow(_norm_cdf(-np.log(x)/s), c*1.0)
def _ppf(self, q, c, s):
return np.exp(-s * _norm_ppf(pow(1.0 - q, 1.0 / c)))
powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm")
class powernorm_gen(rv_continuous):
"""A power normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powernorm` is::
powernorm.pdf(x, c) = c * phi(x) * (Phi(-x))**(c-1)
where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf,
and ``x > 0``, ``c > 0``.
`powernorm` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return c*_norm_pdf(x) * (_norm_cdf(-x)**(c-1.0))
def _logpdf(self, x, c):
return np.log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x)
def _cdf(self, x, c):
return 1.0-_norm_cdf(-x)**(c*1.0)
def _ppf(self, q, c):
return -_norm_ppf(pow(1.0 - q, 1.0 / c))
powernorm = powernorm_gen(name='powernorm')
class rdist_gen(rv_continuous):
"""An R-distributed continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rdist` is::
rdist.pdf(x, c) = (1-x**2)**(c/2-1) / B(1/2, c/2)
for ``-1 <= x <= 1``, ``c > 0``.
`rdist` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return np.power((1.0 - x**2), c / 2.0 - 1) / sc.beta(0.5, c / 2.0)
def _cdf(self, x, c):
term1 = x / sc.beta(0.5, c / 2.0)
res = 0.5 + term1 * sc.hyp2f1(0.5, 1 - c / 2.0, 1.5, x**2)
# There's an issue with hyp2f1, it returns nans near x = +-1, c > 100.
# Use the generic implementation in that case. See gh-1285 for
# background.
if np.any(np.isnan(res)):
return rv_continuous._cdf(self, x, c)
return res
def _munp(self, n, c):
numerator = (1 - (n % 2)) * sc.beta((n + 1.0) / 2, c / 2.0)
return numerator / sc.beta(1. / 2, c / 2.)
rdist = rdist_gen(a=-1.0, b=1.0, name="rdist")
class rayleigh_gen(rv_continuous):
"""A Rayleigh continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rayleigh` is::
rayleigh.pdf(r) = r * exp(-r**2/2)
for ``x >= 0``.
`rayleigh` is a special case of `chi` with ``df == 2``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self):
return chi.rvs(2, size=self._size, random_state=self._random_state)
def _pdf(self, r):
return np.exp(self._logpdf(r))
def _logpdf(self, r):
return np.log(r) - 0.5 * r * r
def _cdf(self, r):
return -sc.expm1(-0.5 * r**2)
def _ppf(self, q):
return np.sqrt(-2 * sc.log1p(-q))
def _sf(self, r):
return np.exp(self._logsf(r))
def _logsf(self, r):
return -0.5 * r * r
def _isf(self, q):
return np.sqrt(-2 * np.log(q))
def _stats(self):
val = 4 - np.pi
return (np.sqrt(np.pi/2),
val/2,
2*(np.pi-3)*np.sqrt(np.pi)/val**1.5,
6*np.pi/val-16/val**2)
def _entropy(self):
return _EULER/2.0 + 1 - 0.5*np.log(2)
rayleigh = rayleigh_gen(a=0.0, name="rayleigh")
class reciprocal_gen(rv_continuous):
"""A reciprocal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `reciprocal` is::
reciprocal.pdf(x, a, b) = 1 / (x*log(b/a))
for ``a <= x <= b``, ``a, b > 0``.
`reciprocal` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self.d = np.log(b*1.0 / a)
return (a > 0) & (b > 0) & (b > a)
def _pdf(self, x, a, b):
return 1.0 / (x * self.d)
def _logpdf(self, x, a, b):
return -np.log(x) - np.log(self.d)
def _cdf(self, x, a, b):
return (np.log(x)-np.log(a)) / self.d
def _ppf(self, q, a, b):
return a*pow(b*1.0/a, q)
def _munp(self, n, a, b):
return 1.0/self.d / n * (pow(b*1.0, n) - pow(a*1.0, n))
def _entropy(self, a, b):
return 0.5*np.log(a*b)+np.log(np.log(b/a))
reciprocal = reciprocal_gen(name="reciprocal")
class rice_gen(rv_continuous):
"""A Rice continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rice` is::
rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
for ``x > 0``, ``b > 0``.
`rice` takes ``b`` as a shape parameter.
%(after_notes)s
The Rice distribution describes the length, ``r``, of a 2-D vector
with components ``(U+u, V+v)``, where ``U, V`` are constant, ``u, v``
are independent Gaussian random variables with standard deviation
``s``. Let ``R = (U**2 + V**2)**0.5``. Then the pdf of ``r`` is
``rice.pdf(x, R/s, scale=s)``.
%(example)s
"""
def _argcheck(self, b):
return b >= 0
def _rvs(self, b):
# http://en.wikipedia.org/wiki/Rice_distribution
t = b/np.sqrt(2) + self._random_state.standard_normal(size=(2,) +
self._size)
return np.sqrt((t*t).sum(axis=0))
def _cdf(self, x, b):
return sc.chndtr(np.square(x), 2, np.square(b))
def _ppf(self, q, b):
return np.sqrt(sc.chndtrix(q, 2, np.square(b)))
def _pdf(self, x, b):
# We use (x**2 + b**2)/2 = ((x-b)**2)/2 + xb.
# The factor of np.exp(-xb) is then included in the i0e function
# in place of the modified Bessel function, i0, improving
# numerical stability for large values of xb.
return x * np.exp(-(x-b)*(x-b)/2.0) * sc.i0e(x*b)
def _munp(self, n, b):
nd2 = n/2.0
n1 = 1 + nd2
b2 = b*b/2.0
return (2.0**(nd2) * np.exp(-b2) * sc.gamma(n1) *
sc.hyp1f1(n1, 1, b2))
rice = rice_gen(a=0.0, name="rice")
# FIXME: PPF does not work.
class recipinvgauss_gen(rv_continuous):
"""A reciprocal inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `recipinvgauss` is::
recipinvgauss.pdf(x, mu) = 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2))
for ``x >= 0``.
`recipinvgauss` takes ``mu`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, mu):
return 1.0/np.sqrt(2*np.pi*x)*np.exp(-(1-mu*x)**2.0 / (2*x*mu**2.0))
def _logpdf(self, x, mu):
return -(1-mu*x)**2.0 / (2*x*mu**2.0) - 0.5*np.log(2*np.pi*x)
def _cdf(self, x, mu):
trm1 = 1.0/mu - x
trm2 = 1.0/mu + x
isqx = 1.0/np.sqrt(x)
return 1.0-_norm_cdf(isqx*trm1)-np.exp(2.0/mu)*_norm_cdf(-isqx*trm2)
def _rvs(self, mu):
return 1.0/self._random_state.wald(mu, 1.0, size=self._size)
recipinvgauss = recipinvgauss_gen(a=0.0, name='recipinvgauss')
class semicircular_gen(rv_continuous):
"""A semicircular continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `semicircular` is::
semicircular.pdf(x) = 2/pi * sqrt(1-x**2)
for ``-1 <= x <= 1``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 2.0/np.pi*np.sqrt(1-x*x)
def _cdf(self, x):
return 0.5+1.0/np.pi*(x*np.sqrt(1-x*x) + np.arcsin(x))
def _stats(self):
return 0, 0.25, 0, -1.0
def _entropy(self):
return 0.64472988584940017414
semicircular = semicircular_gen(a=-1.0, b=1.0, name="semicircular")
class skew_norm_gen(rv_continuous):
"""A skew-normal random variable.
%(before_notes)s
Notes
-----
The pdf is::
skewnorm.pdf(x, a) = 2*norm.pdf(x)*norm.cdf(ax)
`skewnorm` takes ``a`` as a skewness parameter
When a=0 the distribution is identical to a normal distribution.
rvs implements the method of [1]_.
%(after_notes)s
%(example)s
References
----------
.. [1] A. Azzalini and A. Capitanio (1999). Statistical applications of the
multivariate skew-normal distribution. J. Roy. Statist. Soc., B 61, 579-602.
http://azzalini.stat.unipd.it/SN/faq-r.html
"""
def _argcheck(self, a):
return np.isfinite(a)
def _pdf(self, x, a):
return 2.*_norm_pdf(x)*_norm_cdf(a*x)
def _rvs(self, a):
u0 = self._random_state.normal(size=self._size)
v = self._random_state.normal(size=self._size)
d = a/np.sqrt(1 + a**2)
u1 = d*u0 + v*np.sqrt(1 - d**2)
return np.where(u0 >= 0, u1, -u1)
def _stats(self, a, moments='mvsk'):
output = [None, None, None, None]
const = np.sqrt(2/np.pi) * a/np.sqrt(1 + a**2)
if 'm' in moments:
output[0] = const
if 'v' in moments:
output[1] = 1 - const**2
if 's' in moments:
output[2] = ((4 - np.pi)/2) * (const/np.sqrt(1 - const**2))**3
if 'k' in moments:
output[3] = (2*(np.pi - 3)) * (const**4/(1 - const**2)**2)
return output
skewnorm = skew_norm_gen(name='skewnorm')
class trapz_gen(rv_continuous):
"""A trapezoidal continuous random variable.
%(before_notes)s
Notes
-----
The trapezoidal distribution can be represented with an up-sloping line
from ``loc`` to ``(loc + c*scale)``, then constant to ``(loc + d*scale)``
and then downsloping from ``(loc + d*scale)`` to ``(loc+scale)``.
`trapz` takes ``c`` and ``d`` as shape parameters.
%(after_notes)s
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _argcheck(self, c, d):
return (c >= 0) & (c <= 1) & (d >= 0) & (d <= 1) & (d >= c)
def _pdf(self, x, c, d):
u = 2 / (d - c + 1)
condlist = [x < c, x <= d, x > d]
choicelist = [u * x / c, u, u * (1 - x) / (1 - d)]
return np.select(condlist, choicelist)
def _cdf(self, x, c, d):
condlist = [x < c, x <= d, x > d]
choicelist = [x**2 / c / (d - c + 1),
(c + 2 * (x - c)) / (d - c + 1),
1 - ((1 - x)**2 / (d - c + 1) / (1 - d))]
return np.select(condlist, choicelist)
def _ppf(self, q, c, d):
qc, qd = self._cdf(c, c, d), self._cdf(d, c, d)
condlist = [q < qc, q <= qd, q > qd]
choicelist = [np.sqrt(q * c * (1 + d - c)),
0.5 * q * (1 + d - c) + 0.5 * c,
1 - np.sqrt((1 - q) * (d - c + 1) * (1 - d))]
return np.select(condlist, choicelist)
trapz = trapz_gen(a=0.0, b=1.0, name="trapz")
class triang_gen(rv_continuous):
"""A triangular continuous random variable.
%(before_notes)s
Notes
-----
The triangular distribution can be represented with an up-sloping line from
``loc`` to ``(loc + c*scale)`` and then downsloping for ``(loc + c*scale)``
to ``(loc+scale)``.
`triang` takes ``c`` as a shape parameter.
%(after_notes)s
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _rvs(self, c):
return self._random_state.triangular(0, c, 1, self._size)
def _argcheck(self, c):
return (c >= 0) & (c <= 1)
def _pdf(self, x, c):
return np.where(x < c, 2*x/c, 2*(1-x)/(1-c))
def _cdf(self, x, c):
return np.where(x < c, x*x/c, (x*x-2*x+c)/(c-1))
def _ppf(self, q, c):
return np.where(q < c, np.sqrt(c*q), 1-np.sqrt((1-c)*(1-q)))
def _stats(self, c):
return ((c+1.0)/3.0,
(1.0-c+c*c)/18,
np.sqrt(2)*(2*c-1)*(c+1)*(c-2) / (5*np.power((1.0-c+c*c), 1.5)),
-3.0/5.0)
def _entropy(self, c):
return 0.5-np.log(2)
triang = triang_gen(a=0.0, b=1.0, name="triang")
class truncexpon_gen(rv_continuous):
"""A truncated exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `truncexpon` is::
truncexpon.pdf(x, b) = exp(-x) / (1-exp(-b))
for ``0 < x < b``.
`truncexpon` takes ``b`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, b):
self.b = b
return b > 0
def _pdf(self, x, b):
return np.exp(-x)/(-sc.expm1(-b))
def _logpdf(self, x, b):
return -x - np.log(-sc.expm1(-b))
def _cdf(self, x, b):
return sc.expm1(-x)/sc.expm1(-b)
def _ppf(self, q, b):
return -sc.log1p(q*sc.expm1(-b))
def _munp(self, n, b):
# wrong answer with formula, same as in continuous.pdf
# return sc.gamman+1)-sc.gammainc1+n, b)
if n == 1:
return (1-(b+1)*np.exp(-b))/(-sc.expm1(-b))
elif n == 2:
return 2*(1-0.5*(b*b+2*b+2)*np.exp(-b))/(-sc.expm1(-b))
else:
# return generic for higher moments
# return rv_continuous._mom1_sc(self, n, b)
return self._mom1_sc(n, b)
def _entropy(self, b):
eB = np.exp(b)
return np.log(eB-1)+(1+eB*(b-1.0))/(1.0-eB)
truncexpon = truncexpon_gen(a=0.0, name='truncexpon')
class truncnorm_gen(rv_continuous):
"""A truncated normal continuous random variable.
%(before_notes)s
Notes
-----
The standard form of this distribution is a standard normal truncated to
the range [a, b] --- notice that a and b are defined over the domain of the
standard normal. To convert clip values for a specific mean and standard
deviation, use::
a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std
`truncnorm` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self._nb = _norm_cdf(b)
self._na = _norm_cdf(a)
self._sb = _norm_sf(b)
self._sa = _norm_sf(a)
self._delta = np.where(self.a > 0,
-(self._sb - self._sa),
self._nb - self._na)
self._logdelta = np.log(self._delta)
return a != b
def _pdf(self, x, a, b):
return _norm_pdf(x) / self._delta
def _logpdf(self, x, a, b):
return _norm_logpdf(x) - self._logdelta
def _cdf(self, x, a, b):
return (_norm_cdf(x) - self._na) / self._delta
def _ppf(self, q, a, b):
# XXX Use _lazywhere...
ppf = np.where(self.a > 0,
_norm_isf(q*self._sb + self._sa*(1.0-q)),
_norm_ppf(q*self._nb + self._na*(1.0-q)))
return ppf
def _stats(self, a, b):
nA, nB = self._na, self._nb
d = nB - nA
pA, pB = _norm_pdf(a), _norm_pdf(b)
mu = (pA - pB) / d # correction sign
mu2 = 1 + (a*pA - b*pB) / d - mu*mu
return mu, mu2, None, None
truncnorm = truncnorm_gen(name='truncnorm')
# FIXME: RVS does not work.
class tukeylambda_gen(rv_continuous):
"""A Tukey-Lamdba continuous random variable.
%(before_notes)s
Notes
-----
A flexible distribution, able to represent and interpolate between the
following distributions:
- Cauchy (lam=-1)
- logistic (lam=0.0)
- approx Normal (lam=0.14)
- u-shape (lam = 0.5)
- uniform from -1 to 1 (lam = 1)
`tukeylambda` takes ``lam`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lam):
return np.ones(np.shape(lam), dtype=bool)
def _pdf(self, x, lam):
Fx = np.asarray(sc.tklmbda(x, lam))
Px = Fx**(lam-1.0) + (np.asarray(1-Fx))**(lam-1.0)
Px = 1.0/np.asarray(Px)
return np.where((lam <= 0) | (abs(x) < 1.0/np.asarray(lam)), Px, 0.0)
def _cdf(self, x, lam):
return sc.tklmbda(x, lam)
def _ppf(self, q, lam):
return sc.boxcox(q, lam) - sc.boxcox1p(-q, lam)
def _stats(self, lam):
return 0, _tlvar(lam), 0, _tlkurt(lam)
def _entropy(self, lam):
def integ(p):
return np.log(pow(p, lam-1)+pow(1-p, lam-1))
return integrate.quad(integ, 0, 1)[0]
tukeylambda = tukeylambda_gen(name='tukeylambda')
class uniform_gen(rv_continuous):
"""A uniform continuous random variable.
This distribution is constant between `loc` and ``loc + scale``.
%(before_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.uniform(0.0, 1.0, self._size)
def _pdf(self, x):
return 1.0*(x == x)
def _cdf(self, x):
return x
def _ppf(self, q):
return q
def _stats(self):
return 0.5, 1.0/12, 0, -1.2
def _entropy(self):
return 0.0
uniform = uniform_gen(a=0.0, b=1.0, name='uniform')
class vonmises_gen(rv_continuous):
"""A Von Mises continuous random variable.
%(before_notes)s
Notes
-----
If `x` is not in range or `loc` is not in range it assumes they are angles
and converts them to [-pi, pi] equivalents.
The probability density function for `vonmises` is::
vonmises.pdf(x, kappa) = exp(kappa * cos(x)) / (2*pi*I[0](kappa))
for ``-pi <= x <= pi``, ``kappa > 0``.
`vonmises` takes ``kappa`` as a shape parameter.
%(after_notes)s
See Also
--------
vonmises_line : The same distribution, defined on a [-pi, pi] segment
of the real line.
%(example)s
"""
def _rvs(self, kappa):
return self._random_state.vonmises(0.0, kappa, size=self._size)
def _pdf(self, x, kappa):
return np.exp(kappa * np.cos(x)) / (2*np.pi*sc.i0(kappa))
def _cdf(self, x, kappa):
return _stats.von_mises_cdf(kappa, x)
def _stats_skip(self, kappa):
return 0, None, 0, None
def _entropy(self, kappa):
return (-kappa * sc.i1(kappa) / sc.i0(kappa) +
np.log(2 * np.pi * sc.i0(kappa)))
vonmises = vonmises_gen(name='vonmises')
vonmises_line = vonmises_gen(a=-np.pi, b=np.pi, name='vonmises_line')
class wald_gen(invgauss_gen):
"""A Wald continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wald` is::
wald.pdf(x) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))
for ``x > 0``.
`wald` is a special case of `invgauss` with ``mu == 1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self):
return self._random_state.wald(1.0, 1.0, size=self._size)
def _pdf(self, x):
return invgauss._pdf(x, 1.0)
def _logpdf(self, x):
return invgauss._logpdf(x, 1.0)
def _cdf(self, x):
return invgauss._cdf(x, 1.0)
def _stats(self):
return 1.0, 1.0, 3.0, 15.0
wald = wald_gen(a=0.0, name="wald")
class wrapcauchy_gen(rv_continuous):
"""A wrapped Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wrapcauchy` is::
wrapcauchy.pdf(x, c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x)))
for ``0 <= x <= 2*pi``, ``0 < c < 1``.
`wrapcauchy` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return (c > 0) & (c < 1)
def _pdf(self, x, c):
return (1.0-c*c)/(2*np.pi*(1+c*c-2*c*np.cos(x)))
def _cdf(self, x, c):
output = np.zeros(x.shape, dtype=x.dtype)
val = (1.0+c)/(1.0-c)
c1 = x < np.pi
c2 = 1-c1
xp = np.extract(c1, x)
xn = np.extract(c2, x)
if np.any(xn):
valn = np.extract(c2, np.ones_like(x)*val)
xn = 2*np.pi - xn
yn = np.tan(xn/2.0)
on = 1.0-1.0/np.pi*np.arctan(valn*yn)
np.place(output, c2, on)
if np.any(xp):
valp = np.extract(c1, np.ones_like(x)*val)
yp = np.tan(xp/2.0)
op = 1.0/np.pi*np.arctan(valp*yp)
np.place(output, c1, op)
return output
def _ppf(self, q, c):
val = (1.0-c)/(1.0+c)
rcq = 2*np.arctan(val*np.tan(np.pi*q))
rcmq = 2*np.pi-2*np.arctan(val*np.tan(np.pi*(1-q)))
return np.where(q < 1.0/2, rcq, rcmq)
def _entropy(self, c):
return np.log(2*np.pi*(1-c*c))
wrapcauchy = wrapcauchy_gen(a=0.0, b=2*np.pi, name='wrapcauchy')
class gennorm_gen(rv_continuous):
"""A generalized normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gennorm` is [1]_::
beta
gennorm.pdf(x, beta) = --------------- exp(-|x|**beta)
2 gamma(1/beta)
`gennorm` takes ``beta`` as a shape parameter.
For ``beta = 1``, it is identical to a Laplace distribution.
For ``beta = 2``, it is identical to a normal distribution
(with ``scale=1/sqrt(2)``).
See Also
--------
laplace : Laplace distribution
norm : normal distribution
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
%(example)s
"""
def _pdf(self, x, beta):
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(0.5*beta) - sc.gammaln(1.0/beta) - abs(x)**beta
def _cdf(self, x, beta):
c = 0.5 * np.sign(x)
# evaluating (.5 + c) first prevents numerical cancellation
return (0.5 + c) - c * sc.gammaincc(1.0/beta, abs(x)**beta)
def _ppf(self, x, beta):
c = np.sign(x - 0.5)
# evaluating (1. + c) first prevents numerical cancellation
return c * sc.gammainccinv(1.0/beta, (1.0 + c) - 2.0*c*x)**(1.0/beta)
def _sf(self, x, beta):
return self._cdf(-x, beta)
def _isf(self, x, beta):
return -self._ppf(x, beta)
def _stats(self, beta):
c1, c3, c5 = sc.gammaln([1.0/beta, 3.0/beta, 5.0/beta])
return 0., np.exp(c3 - c1), 0., np.exp(c5 + c1 - 2.0*c3) - 3.
def _entropy(self, beta):
return 1. / beta - np.log(.5 * beta) + sc.gammaln(1. / beta)
gennorm = gennorm_gen(name='gennorm')
class halfgennorm_gen(rv_continuous):
"""The upper half of a generalized normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfgennorm` is::
beta
halfgennorm.pdf(x, beta) = ------------- exp(-|x|**beta)
gamma(1/beta)
`gennorm` takes ``beta`` as a shape parameter.
For ``beta = 1``, it is identical to an exponential distribution.
For ``beta = 2``, it is identical to a half normal distribution
(with ``scale=1/sqrt(2)``).
See Also
--------
gennorm : generalized normal distribution
expon : exponential distribution
halfnorm : half normal distribution
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
%(example)s
"""
def _pdf(self, x, beta):
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(beta) - sc.gammaln(1.0/beta) - x**beta
def _cdf(self, x, beta):
return sc.gammainc(1.0/beta, x**beta)
def _ppf(self, x, beta):
return sc.gammaincinv(1.0/beta, x)**(1.0/beta)
def _sf(self, x, beta):
return sc.gammaincc(1.0/beta, x**beta)
def _isf(self, x, beta):
return sc.gammainccinv(1.0/beta, x)**(1.0/beta)
def _entropy(self, beta):
return 1.0/beta - np.log(beta) + sc.gammaln(1.0/beta)
halfgennorm = halfgennorm_gen(a=0, name='halfgennorm')
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_continuous)
__all__ = _distn_names + _distn_gen_names
| bsd-3-clause |
julienbou/heroku-buildpack-serpan | vendor/distribute-0.6.36/setuptools/command/alias.py | 135 | 2477 | import distutils, os
from setuptools import Command
from distutils.util import convert_path
from distutils import log
from distutils.errors import *
from setuptools.command.setopt import edit_config, option_base, config_file
def shquote(arg):
"""Quote an argument for later parsing by shlex.split()"""
for c in '"', "'", "\\", "#":
if c in arg: return repr(arg)
if arg.split()<>[arg]:
return repr(arg)
return arg
class alias(option_base):
"""Define a shortcut that invokes one or more commands"""
description = "define a shortcut to invoke one or more commands"
command_consumes_arguments = True
user_options = [
('remove', 'r', 'remove (unset) the alias'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.args = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.remove and len(self.args)<>1:
raise DistutilsOptionError(
"Must specify exactly one argument (the alias name) when "
"using --remove"
)
def run(self):
aliases = self.distribution.get_option_dict('aliases')
if not self.args:
print "Command Aliases"
print "---------------"
for alias in aliases:
print "setup.py alias", format_alias(alias, aliases)
return
elif len(self.args)==1:
alias, = self.args
if self.remove:
command = None
elif alias in aliases:
print "setup.py alias", format_alias(alias, aliases)
return
else:
print "No alias definition found for %r" % alias
return
else:
alias = self.args[0]
command = ' '.join(map(shquote,self.args[1:]))
edit_config(self.filename, {'aliases': {alias:command}}, self.dry_run)
def format_alias(name, aliases):
source, command = aliases[name]
if source == config_file('global'):
source = '--global-config '
elif source == config_file('user'):
source = '--user-config '
elif source == config_file('local'):
source = ''
else:
source = '--filename=%r' % source
return source+name+' '+command
| mit |
rakeshmi/tempest | tempest/tests/cmd/test_tempest_init.py | 14 | 2793 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
from tempest.cmd import init
from tempest.tests import base
class TestTempestInit(base.TestCase):
def test_generate_testr_conf(self):
# Create fake conf dir
conf_dir = self.useFixture(fixtures.TempDir())
init_cmd = init.TempestInit(None, None)
init_cmd.generate_testr_conf(conf_dir.path)
# Generate expected file contents
top_level_path = os.path.dirname(os.path.dirname(init.__file__))
discover_path = os.path.join(top_level_path, 'test_discover')
testr_conf_file = init.TESTR_CONF % (top_level_path, discover_path)
conf_path = conf_dir.join('.testr.conf')
conf_file = open(conf_path, 'r')
self.addCleanup(conf_file.close)
self.assertEqual(conf_file.read(), testr_conf_file)
def test_create_working_dir(self):
fake_local_dir = self.useFixture(fixtures.TempDir())
fake_local_conf_dir = self.useFixture(fixtures.TempDir())
# Create a fake conf file
fake_file = fake_local_conf_dir.join('conf_file.conf')
open(fake_file, 'w').close()
init_cmd = init.TempestInit(None, None)
init_cmd.create_working_dir(fake_local_dir.path,
fake_local_conf_dir.path)
# Assert directories are created
lock_path = os.path.join(fake_local_dir.path, 'tempest_lock')
etc_dir = os.path.join(fake_local_dir.path, 'etc')
log_dir = os.path.join(fake_local_dir.path, 'logs')
testr_dir = os.path.join(fake_local_dir.path, '.testrepository')
self.assertTrue(os.path.isdir(lock_path))
self.assertTrue(os.path.isdir(etc_dir))
self.assertTrue(os.path.isdir(log_dir))
self.assertTrue(os.path.isdir(testr_dir))
# Assert file creation
fake_file_moved = os.path.join(etc_dir, 'conf_file.conf')
local_conf_file = os.path.join(etc_dir, 'tempest.conf')
local_testr_conf = os.path.join(fake_local_dir.path, '.testr.conf')
self.assertTrue(os.path.isfile(fake_file_moved))
self.assertTrue(os.path.isfile(local_conf_file))
self.assertTrue(os.path.isfile(local_testr_conf))
| apache-2.0 |
sam-tsai/django | tests/utils_tests/test_dateformat.py | 265 | 6177 | from __future__ import unicode_literals
from datetime import date, datetime
from django.test import SimpleTestCase, override_settings
from django.test.utils import TZ_SUPPORT, requires_tz_support
from django.utils import dateformat, translation
from django.utils.dateformat import format
from django.utils.timezone import (
get_default_timezone, get_fixed_timezone, make_aware, utc,
)
@override_settings(TIME_ZONE='Europe/Copenhagen')
class DateFormatTests(SimpleTestCase):
def setUp(self):
self._orig_lang = translation.get_language()
translation.activate('en-us')
def tearDown(self):
translation.activate(self._orig_lang)
def test_date(self):
d = date(2009, 5, 16)
self.assertEqual(date.fromtimestamp(int(format(d, 'U'))), d)
def test_naive_datetime(self):
dt = datetime(2009, 5, 16, 5, 30, 30)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt)
@requires_tz_support
def test_datetime_with_local_tzinfo(self):
ltz = get_default_timezone()
dt = make_aware(datetime(2009, 5, 16, 5, 30, 30), ltz)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz), dt)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt.replace(tzinfo=None))
@requires_tz_support
def test_datetime_with_tzinfo(self):
tz = get_fixed_timezone(-510)
ltz = get_default_timezone()
dt = make_aware(datetime(2009, 5, 16, 5, 30, 30), ltz)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), tz), dt)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz), dt)
# astimezone() is safe here because the target timezone doesn't have DST
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt.astimezone(ltz).replace(tzinfo=None))
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), tz).utctimetuple(), dt.utctimetuple())
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz).utctimetuple(), dt.utctimetuple())
def test_epoch(self):
udt = datetime(1970, 1, 1, tzinfo=utc)
self.assertEqual(format(udt, 'U'), '0')
def test_empty_format(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, ''), '')
def test_am_pm(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, 'a'), 'p.m.')
def test_microsecond(self):
# Regression test for #18951
dt = datetime(2009, 5, 16, microsecond=123)
self.assertEqual(dateformat.format(dt, 'u'), '000123')
def test_date_formats(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
timestamp = datetime(2008, 5, 19, 11, 45, 23, 123456)
self.assertEqual(dateformat.format(my_birthday, 'A'), 'PM')
self.assertEqual(dateformat.format(timestamp, 'c'), '2008-05-19T11:45:23.123456')
self.assertEqual(dateformat.format(my_birthday, 'd'), '08')
self.assertEqual(dateformat.format(my_birthday, 'j'), '8')
self.assertEqual(dateformat.format(my_birthday, 'l'), 'Sunday')
self.assertEqual(dateformat.format(my_birthday, 'L'), 'False')
self.assertEqual(dateformat.format(my_birthday, 'm'), '07')
self.assertEqual(dateformat.format(my_birthday, 'M'), 'Jul')
self.assertEqual(dateformat.format(my_birthday, 'b'), 'jul')
self.assertEqual(dateformat.format(my_birthday, 'n'), '7')
self.assertEqual(dateformat.format(my_birthday, 'N'), 'July')
def test_time_formats(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, 'P'), '10 p.m.')
self.assertEqual(dateformat.format(my_birthday, 's'), '00')
self.assertEqual(dateformat.format(my_birthday, 'S'), 'th')
self.assertEqual(dateformat.format(my_birthday, 't'), '31')
self.assertEqual(dateformat.format(my_birthday, 'w'), '0')
self.assertEqual(dateformat.format(my_birthday, 'W'), '27')
self.assertEqual(dateformat.format(my_birthday, 'y'), '79')
self.assertEqual(dateformat.format(my_birthday, 'Y'), '1979')
self.assertEqual(dateformat.format(my_birthday, 'z'), '189')
def test_dateformat(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, r'Y z \C\E\T'), '1979 189 CET')
self.assertEqual(dateformat.format(my_birthday, r'jS \o\f F'), '8th of July')
def test_futuredates(self):
the_future = datetime(2100, 10, 25, 0, 00)
self.assertEqual(dateformat.format(the_future, r'Y'), '2100')
def test_timezones(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
summertime = datetime(2005, 10, 30, 1, 00)
wintertime = datetime(2005, 10, 30, 4, 00)
timestamp = datetime(2008, 5, 19, 11, 45, 23, 123456)
# 3h30m to the west of UTC
tz = get_fixed_timezone(-210)
aware_dt = datetime(2009, 5, 16, 5, 30, 30, tzinfo=tz)
if TZ_SUPPORT:
self.assertEqual(dateformat.format(my_birthday, 'O'), '+0100')
self.assertEqual(dateformat.format(my_birthday, 'r'), 'Sun, 8 Jul 1979 22:00:00 +0100')
self.assertEqual(dateformat.format(my_birthday, 'T'), 'CET')
self.assertEqual(dateformat.format(my_birthday, 'e'), '')
self.assertEqual(dateformat.format(aware_dt, 'e'), '-0330')
self.assertEqual(dateformat.format(my_birthday, 'U'), '300315600')
self.assertEqual(dateformat.format(timestamp, 'u'), '123456')
self.assertEqual(dateformat.format(my_birthday, 'Z'), '3600')
self.assertEqual(dateformat.format(summertime, 'I'), '1')
self.assertEqual(dateformat.format(summertime, 'O'), '+0200')
self.assertEqual(dateformat.format(wintertime, 'I'), '0')
self.assertEqual(dateformat.format(wintertime, 'O'), '+0100')
# Ticket #16924 -- We don't need timezone support to test this
self.assertEqual(dateformat.format(aware_dt, 'O'), '-0330')
| bsd-3-clause |
meowler/sandbox | node_modules/node-gyp/gyp/pylib/gyp/xcode_ninja.py | 1789 | 10585 | # Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode-ninja wrapper project file generator.
This updates the data structures passed to the Xcode gyp generator to build
with ninja instead. The Xcode project itself is transformed into a list of
executable targets, each with a build step to build with ninja, and a target
with every source and resource file. This appears to sidestep some of the
major performance headaches experienced using complex projects and large number
of targets within Xcode.
"""
import errno
import gyp.generator.ninja
import os
import re
import xml.sax.saxutils
def _WriteWorkspace(main_gyp, sources_gyp, params):
""" Create a workspace to wrap main and sources gyp paths. """
(build_file_root, build_file_ext) = os.path.splitext(main_gyp)
workspace_path = build_file_root + '.xcworkspace'
options = params['options']
if options.generator_output:
workspace_path = os.path.join(options.generator_output, workspace_path)
try:
os.makedirs(workspace_path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
output_string = '<?xml version="1.0" encoding="UTF-8"?>\n' + \
'<Workspace version = "1.0">\n'
for gyp_name in [main_gyp, sources_gyp]:
name = os.path.splitext(os.path.basename(gyp_name))[0] + '.xcodeproj'
name = xml.sax.saxutils.quoteattr("group:" + name)
output_string += ' <FileRef location = %s></FileRef>\n' % name
output_string += '</Workspace>\n'
workspace_file = os.path.join(workspace_path, "contents.xcworkspacedata")
try:
with open(workspace_file, 'r') as input_file:
input_string = input_file.read()
if input_string == output_string:
return
except IOError:
# Ignore errors if the file doesn't exist.
pass
with open(workspace_file, 'w') as output_file:
output_file.write(output_string)
def _TargetFromSpec(old_spec, params):
""" Create fake target for xcode-ninja wrapper. """
# Determine ninja top level build dir (e.g. /path/to/out).
ninja_toplevel = None
jobs = 0
if params:
options = params['options']
ninja_toplevel = \
os.path.join(options.toplevel_dir,
gyp.generator.ninja.ComputeOutputDir(params))
jobs = params.get('generator_flags', {}).get('xcode_ninja_jobs', 0)
target_name = old_spec.get('target_name')
product_name = old_spec.get('product_name', target_name)
product_extension = old_spec.get('product_extension')
ninja_target = {}
ninja_target['target_name'] = target_name
ninja_target['product_name'] = product_name
if product_extension:
ninja_target['product_extension'] = product_extension
ninja_target['toolset'] = old_spec.get('toolset')
ninja_target['default_configuration'] = old_spec.get('default_configuration')
ninja_target['configurations'] = {}
# Tell Xcode to look in |ninja_toplevel| for build products.
new_xcode_settings = {}
if ninja_toplevel:
new_xcode_settings['CONFIGURATION_BUILD_DIR'] = \
"%s/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)" % ninja_toplevel
if 'configurations' in old_spec:
for config in old_spec['configurations'].iterkeys():
old_xcode_settings = \
old_spec['configurations'][config].get('xcode_settings', {})
if 'IPHONEOS_DEPLOYMENT_TARGET' in old_xcode_settings:
new_xcode_settings['CODE_SIGNING_REQUIRED'] = "NO"
new_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET'] = \
old_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET']
ninja_target['configurations'][config] = {}
ninja_target['configurations'][config]['xcode_settings'] = \
new_xcode_settings
ninja_target['mac_bundle'] = old_spec.get('mac_bundle', 0)
ninja_target['ios_app_extension'] = old_spec.get('ios_app_extension', 0)
ninja_target['ios_watchkit_extension'] = \
old_spec.get('ios_watchkit_extension', 0)
ninja_target['ios_watchkit_app'] = old_spec.get('ios_watchkit_app', 0)
ninja_target['type'] = old_spec['type']
if ninja_toplevel:
ninja_target['actions'] = [
{
'action_name': 'Compile and copy %s via ninja' % target_name,
'inputs': [],
'outputs': [],
'action': [
'env',
'PATH=%s' % os.environ['PATH'],
'ninja',
'-C',
new_xcode_settings['CONFIGURATION_BUILD_DIR'],
target_name,
],
'message': 'Compile and copy %s via ninja' % target_name,
},
]
if jobs > 0:
ninja_target['actions'][0]['action'].extend(('-j', jobs))
return ninja_target
def IsValidTargetForWrapper(target_extras, executable_target_pattern, spec):
"""Limit targets for Xcode wrapper.
Xcode sometimes performs poorly with too many targets, so only include
proper executable targets, with filters to customize.
Arguments:
target_extras: Regular expression to always add, matching any target.
executable_target_pattern: Regular expression limiting executable targets.
spec: Specifications for target.
"""
target_name = spec.get('target_name')
# Always include targets matching target_extras.
if target_extras is not None and re.search(target_extras, target_name):
return True
# Otherwise just show executable targets.
if spec.get('type', '') == 'executable' and \
spec.get('product_extension', '') != 'bundle':
# If there is a filter and the target does not match, exclude the target.
if executable_target_pattern is not None:
if not re.search(executable_target_pattern, target_name):
return False
return True
return False
def CreateWrapper(target_list, target_dicts, data, params):
"""Initialize targets for the ninja wrapper.
This sets up the necessary variables in the targets to generate Xcode projects
that use ninja as an external builder.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dict of flattened build files keyed on gyp path.
params: Dict of global options for gyp.
"""
orig_gyp = params['build_files'][0]
for gyp_name, gyp_dict in data.iteritems():
if gyp_name == orig_gyp:
depth = gyp_dict['_DEPTH']
# Check for custom main gyp name, otherwise use the default CHROMIUM_GYP_FILE
# and prepend .ninja before the .gyp extension.
generator_flags = params.get('generator_flags', {})
main_gyp = generator_flags.get('xcode_ninja_main_gyp', None)
if main_gyp is None:
(build_file_root, build_file_ext) = os.path.splitext(orig_gyp)
main_gyp = build_file_root + ".ninja" + build_file_ext
# Create new |target_list|, |target_dicts| and |data| data structures.
new_target_list = []
new_target_dicts = {}
new_data = {}
# Set base keys needed for |data|.
new_data[main_gyp] = {}
new_data[main_gyp]['included_files'] = []
new_data[main_gyp]['targets'] = []
new_data[main_gyp]['xcode_settings'] = \
data[orig_gyp].get('xcode_settings', {})
# Normally the xcode-ninja generator includes only valid executable targets.
# If |xcode_ninja_executable_target_pattern| is set, that list is reduced to
# executable targets that match the pattern. (Default all)
executable_target_pattern = \
generator_flags.get('xcode_ninja_executable_target_pattern', None)
# For including other non-executable targets, add the matching target name
# to the |xcode_ninja_target_pattern| regular expression. (Default none)
target_extras = generator_flags.get('xcode_ninja_target_pattern', None)
for old_qualified_target in target_list:
spec = target_dicts[old_qualified_target]
if IsValidTargetForWrapper(target_extras, executable_target_pattern, spec):
# Add to new_target_list.
target_name = spec.get('target_name')
new_target_name = '%s:%s#target' % (main_gyp, target_name)
new_target_list.append(new_target_name)
# Add to new_target_dicts.
new_target_dicts[new_target_name] = _TargetFromSpec(spec, params)
# Add to new_data.
for old_target in data[old_qualified_target.split(':')[0]]['targets']:
if old_target['target_name'] == target_name:
new_data_target = {}
new_data_target['target_name'] = old_target['target_name']
new_data_target['toolset'] = old_target['toolset']
new_data[main_gyp]['targets'].append(new_data_target)
# Create sources target.
sources_target_name = 'sources_for_indexing'
sources_target = _TargetFromSpec(
{ 'target_name' : sources_target_name,
'toolset': 'target',
'default_configuration': 'Default',
'mac_bundle': '0',
'type': 'executable'
}, None)
# Tell Xcode to look everywhere for headers.
sources_target['configurations'] = {'Default': { 'include_dirs': [ depth ] } }
sources = []
for target, target_dict in target_dicts.iteritems():
base = os.path.dirname(target)
files = target_dict.get('sources', []) + \
target_dict.get('mac_bundle_resources', [])
for action in target_dict.get('actions', []):
files.extend(action.get('inputs', []))
# Remove files starting with $. These are mostly intermediate files for the
# build system.
files = [ file for file in files if not file.startswith('$')]
# Make sources relative to root build file.
relative_path = os.path.dirname(main_gyp)
sources += [ os.path.relpath(os.path.join(base, file), relative_path)
for file in files ]
sources_target['sources'] = sorted(set(sources))
# Put sources_to_index in it's own gyp.
sources_gyp = \
os.path.join(os.path.dirname(main_gyp), sources_target_name + ".gyp")
fully_qualified_target_name = \
'%s:%s#target' % (sources_gyp, sources_target_name)
# Add to new_target_list, new_target_dicts and new_data.
new_target_list.append(fully_qualified_target_name)
new_target_dicts[fully_qualified_target_name] = sources_target
new_data_target = {}
new_data_target['target_name'] = sources_target['target_name']
new_data_target['_DEPTH'] = depth
new_data_target['toolset'] = "target"
new_data[sources_gyp] = {}
new_data[sources_gyp]['targets'] = []
new_data[sources_gyp]['included_files'] = []
new_data[sources_gyp]['xcode_settings'] = \
data[orig_gyp].get('xcode_settings', {})
new_data[sources_gyp]['targets'].append(new_data_target)
# Write workspace to file.
_WriteWorkspace(main_gyp, sources_gyp, params)
return (new_target_list, new_target_dicts, new_data)
| mit |
logituit/Recbot | IMDbPY-5.1.1/bin/search_character.py | 7 | 1413 | #!/usr/bin/env python
"""
search_character.py
Usage: search_character "character name"
Search for the given name and print the results.
"""
import sys
# Import the IMDbPY package.
try:
import imdb
except ImportError:
print 'You bad boy! You need to install the IMDbPY package!'
sys.exit(1)
if len(sys.argv) != 2:
print 'Only one argument is required:'
print ' %s "character name"' % sys.argv[0]
sys.exit(2)
name = sys.argv[1]
i = imdb.IMDb()
in_encoding = sys.stdin.encoding or sys.getdefaultencoding()
out_encoding = sys.stdout.encoding or sys.getdefaultencoding()
name = unicode(name, in_encoding, 'replace')
try:
# Do the search, and get the results (a list of character objects).
results = i.search_character(name)
except imdb.IMDbError, e:
print "Probably you're not connected to Internet. Complete error report:"
print e
sys.exit(3)
# Print the results.
print ' %s result%s for "%s":' % (len(results),
('', 's')[len(results) != 1],
name.encode(out_encoding, 'replace'))
print 'characterID\t: imdbID : name'
# Print the long imdb name for every character.
for character in results:
outp = u'%s\t\t: %s : %s' % (character.characterID, i.get_imdbID(character),
character['long imdb name'])
print outp.encode(out_encoding, 'replace')
| mit |
fpiot/mbed-ats | workspace_tools/toolchains/gcc.py | 2 | 10915 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from os.path import join, basename, splitext
from workspace_tools.toolchains import mbedToolchain
from workspace_tools.settings import GCC_ARM_PATH, GCC_CR_PATH, GCC_CS_PATH, CW_EWL_PATH, CW_GCC_PATH
from workspace_tools.settings import GOANNA_PATH
from workspace_tools.hooks import hook_tool
class GCC(mbedToolchain):
LINKER_EXT = '.ld'
LIBRARY_EXT = '.a'
STD_LIB_NAME = "lib%s.a"
CIRCULAR_DEPENDENCIES = True
DIAGNOSTIC_PATTERN = re.compile('((?P<line>\d+):)(\d+:)? (?P<severity>warning|error): (?P<message>.+)')
def __init__(self, target, options=None, notify=None, macros=None, silent=False, tool_path=""):
mbedToolchain.__init__(self, target, options, notify, macros, silent)
if target.core == "Cortex-M0+":
cpu = "cortex-m0plus"
elif target.core == "Cortex-M4F":
cpu = "cortex-m4"
else:
cpu = target.core.lower()
self.cpu = ["-mcpu=%s" % cpu]
if target.core.startswith("Cortex"):
self.cpu.append("-mthumb")
if target.core == "Cortex-M4F":
self.cpu.append("-mfpu=fpv4-sp-d16")
self.cpu.append("-mfloat-abi=softfp")
if target.core == "Cortex-A9":
self.cpu.append("-mthumb-interwork")
self.cpu.append("-marm")
self.cpu.append("-march=armv7-a")
self.cpu.append("-mfpu=vfpv3")
self.cpu.append("-mfloat-abi=hard")
self.cpu.append("-mno-unaligned-access")
# Note: We are using "-O2" instead of "-Os" to avoid this known GCC bug:
# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=46762
common_flags = ["-c", "-Wall", "-Wextra",
"-Wno-unused-parameter", "-Wno-missing-field-initializers",
"-fmessage-length=0", "-fno-exceptions", "-fno-builtin",
"-ffunction-sections", "-fdata-sections",
"-MMD", "-fno-delete-null-pointer-checks", "-fomit-frame-pointer"
] + self.cpu
if "save-asm" in self.options:
common_flags.append("-save-temps")
if "debug-info" in self.options:
common_flags.append("-g")
common_flags.append("-O0")
else:
common_flags.append("-O2")
main_cc = join(tool_path, "arm-none-eabi-gcc")
main_cppc = join(tool_path, "arm-none-eabi-g++")
self.asm = [main_cc, "-x", "assembler-with-cpp"] + common_flags
if not "analyze" in self.options:
self.cc = [main_cc, "-std=gnu99"] + common_flags
self.cppc =[main_cppc, "-std=gnu++98", "-fno-rtti"] + common_flags
else:
self.cc = [join(GOANNA_PATH, "goannacc"), "--with-cc=" + main_cc.replace('\\', '/'), "-std=gnu99", "--dialect=gnu", '--output-format="%s"' % self.GOANNA_FORMAT] + common_flags
self.cppc= [join(GOANNA_PATH, "goannac++"), "--with-cxx=" + main_cppc.replace('\\', '/'), "-std=gnu++98", "-fno-rtti", "--dialect=gnu", '--output-format="%s"' % self.GOANNA_FORMAT] + common_flags
self.ld = [join(tool_path, "arm-none-eabi-gcc"), "-Wl,--gc-sections", "-Wl,--wrap,main"] + self.cpu
self.sys_libs = ["stdc++", "supc++", "m", "c", "gcc"]
self.ar = join(tool_path, "arm-none-eabi-ar")
self.elf2bin = join(tool_path, "arm-none-eabi-objcopy")
def assemble(self, source, object, includes):
return [self.hook.get_cmdline_assembler(self.asm + ['-D%s' % s for s in self.get_symbols() + self.macros] + ["-I%s" % i for i in includes] + ["-o", object, source])]
def parse_dependencies(self, dep_path):
dependencies = []
for line in open(dep_path).readlines()[1:]:
file = line.replace('\\\n', '').strip()
if file:
# GCC might list more than one dependency on a single line, in this case
# the dependencies are separated by a space. However, a space might also
# indicate an actual space character in a dependency path, but in this case
# the space character is prefixed by a backslash.
# Temporary replace all '\ ' with a special char that is not used (\a in this
# case) to keep them from being interpreted by 'split' (they will be converted
# back later to a space char)
file = file.replace('\\ ', '\a')
if file.find(" ") == -1:
dependencies.append(file.replace('\a', ' '))
else:
dependencies = dependencies + [f.replace('\a', ' ') for f in file.split(" ")]
return dependencies
def parse_output(self, output):
# The warning/error notification is multiline
WHERE, WHAT = 0, 1
state, file, message = WHERE, None, None
for line in output.splitlines():
match = self.goanna_parse_line(line)
if match is not None:
self.cc_info(
match.group('severity').lower(),
match.group('file'),
match.group('line'),
match.group('message'),
target_name=self.target.name,
toolchain_name=self.name
)
continue
# Each line should start with the file information: "filepath: ..."
# i should point past the file path ^
# avoid the first column in Windows (C:\)
i = line.find(':', 2)
if i == -1: continue
if state == WHERE:
file = line[:i]
message = line[i+1:].strip() + ' '
state = WHAT
elif state == WHAT:
match = GCC.DIAGNOSTIC_PATTERN.match(line[i+1:])
if match is None:
state = WHERE
continue
self.cc_info(
match.group('severity'),
file, match.group('line'),
message + match.group('message')
)
def archive(self, objects, lib_path):
self.default_cmd([self.ar, "rcs", lib_path] + objects)
def link(self, output, objects, libraries, lib_dirs, mem_map):
libs = []
for l in libraries:
name, _ = splitext(basename(l))
libs.append("-l%s" % name[3:])
libs.extend(["-l%s" % l for l in self.sys_libs])
# NOTE: There is a circular dependency between the mbed library and the clib
# We could define a set of week symbols to satisfy the clib dependencies in "sys.o",
# but if an application uses only clib symbols and not mbed symbols, then the final
# image is not correctly retargeted
if self.CIRCULAR_DEPENDENCIES:
libs.extend(libs)
self.default_cmd(self.hook.get_cmdline_linker(self.ld + ["-T%s" % mem_map, "-o", output] +
objects + ["-L%s" % L for L in lib_dirs] + libs))
@hook_tool
def binary(self, resources, elf, bin):
self.default_cmd(self.hook.get_cmdline_binary([self.elf2bin, "-O", "binary", elf, bin]))
class GCC_ARM(GCC):
def __init__(self, target, options=None, notify=None, macros=None, silent=False):
GCC.__init__(self, target, options, notify, macros, silent, GCC_ARM_PATH)
# Use latest gcc nanolib
self.ld.append("--specs=nano.specs")
if target.name in ["LPC1768", "LPC4088", "LPC4088_DM", "LPC4330", "UBLOX_C027", "LPC2368"]:
self.ld.extend(["-u _printf_float", "-u _scanf_float"])
elif target.name in ["RZ_A1H", "ARCH_MAX", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F401RE", "NUCLEO_F411RE", "NUCLEO_F446RE", "ELMO_F411RE"]:
self.ld.extend(["-u_printf_float", "-u_scanf_float"])
self.sys_libs.append("nosys")
class GCC_CR(GCC):
def __init__(self, target, options=None, notify=None, macros=None, silent=False):
GCC.__init__(self, target, options, notify, macros, silent, GCC_CR_PATH)
additional_compiler_flags = [
"-D__NEWLIB__", "-D__CODE_RED", "-D__USE_CMSIS", "-DCPP_USE_HEAP",
]
self.cc += additional_compiler_flags
self.cppc += additional_compiler_flags
# Use latest gcc nanolib
self.ld.append("--specs=nano.specs")
if target.name in ["LPC1768", "LPC4088", "LPC4088_DM", "LPC4330", "UBLOX_C027", "LPC2368"]:
self.ld.extend(["-u _printf_float", "-u _scanf_float"])
self.ld += ["-nostdlib"]
class GCC_CS(GCC):
def __init__(self, target, options=None, notify=None, macros=None, silent=False):
GCC.__init__(self, target, options, notify, macros, silent, GCC_CS_PATH)
class GCC_CW(GCC):
ARCH_LIB = {
"Cortex-M0+": "armv6-m",
}
def __init__(self, target, options=None, notify=None, macros=None, silent=False):
GCC.__init__(self, target, options, notify, macros, silent, CW_GCC_PATH)
class GCC_CW_EWL(GCC_CW):
def __init__(self, target, options=None, notify=None, macros=None, silent=False):
GCC_CW.__init__(self, target, options, notify, macros, silent)
# Compiler
common = [
'-mfloat-abi=soft',
'-nostdinc', '-I%s' % join(CW_EWL_PATH, "EWL_C", "include"),
]
self.cc += common + [
'-include', join(CW_EWL_PATH, "EWL_C", "include", 'lib_c99.prefix')
]
self.cppc += common + [
'-nostdinc++', '-I%s' % join(CW_EWL_PATH, "EWL_C++", "include"),
'-include', join(CW_EWL_PATH, "EWL_C++", "include", 'lib_ewl_c++.prefix')
]
# Linker
self.sys_libs = []
self.CIRCULAR_DEPENDENCIES = False
self.ld = [join(CW_GCC_PATH, "arm-none-eabi-g++"),
"-Xlinker --gc-sections",
"-L%s" % join(CW_EWL_PATH, "lib", GCC_CW.ARCH_LIB[target.core]),
"-n", "-specs=ewl_c++.specs", "-mfloat-abi=soft",
"-Xlinker --undefined=__pformatter_", "-Xlinker --defsym=__pformatter=__pformatter_",
"-Xlinker --undefined=__sformatter", "-Xlinker --defsym=__sformatter=__sformatter",
] + self.cpu
class GCC_CW_NEWLIB(GCC_CW):
def __init__(self, target, options=None, notify=None, macros=None, silent=False):
GCC_CW.__init__(self, target, options, notify, macros, silent)
| apache-2.0 |
jwlawson/tensorflow | tensorflow/contrib/training/python/training/device_setter_test.py | 54 | 5692 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.training.device_setter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.training.python.training import device_setter as device_setter_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import device_setter
from tensorflow.python.training import server_lib
_CLUSTER_SPEC = server_lib.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
})
MockOperation = collections.namedtuple("MockOperation", "name")
class RandomStrategyTest(test.TestCase):
def testBasic(self):
ps_strategy = device_setter_lib.RandomStrategy(2, seed=0)
with ops.device(
device_setter.replica_device_setter(
cluster=_CLUSTER_SPEC,
ps_strategy=ps_strategy)):
u = variables.Variable(array_ops.zeros([2, 2]))
v = variables.Variable(array_ops.zeros([2, 1]))
w = variables.Variable(array_ops.zeros([2, 2]))
x = variables.Variable(array_ops.zeros([1, 3]))
a = v + w
# Randomly distributed with seed 0.
self.assertDeviceEqual("/job:ps/task:1", u.device)
self.assertDeviceEqual("/job:ps/task:1", u.initializer.device)
self.assertDeviceEqual("/job:ps/task:0", v.device)
self.assertDeviceEqual("/job:ps/task:0", v.initializer.device)
self.assertDeviceEqual("/job:ps/task:1", w.device)
self.assertDeviceEqual("/job:ps/task:1", w.initializer.device)
self.assertDeviceEqual("/job:ps/task:1", x.device)
self.assertDeviceEqual("/job:ps/task:1", x.initializer.device)
self.assertDeviceEqual("/job:worker", a.device)
def testHandlesUnicode(self):
op = MockOperation(u"A unicode \u018e string \xf1")
ps_strategy = device_setter_lib.RandomStrategy(2, seed=0)
ps_task = ps_strategy(op)
self.assertEqual(ps_task, 1)
class GreedyLoadBalancingStrategyTest(test.TestCase):
def testUniformLoadEqualsRoundRobin(self):
def _load_fn(unused_op):
return 1
with ops.device(
device_setter.replica_device_setter(
cluster=_CLUSTER_SPEC,
ps_strategy=device_setter_lib.GreedyLoadBalancingStrategy(
2, _load_fn))):
u = variables.Variable(array_ops.zeros([2, 2]))
v = variables.Variable(array_ops.zeros([2, 1]))
w = variables.Variable(array_ops.zeros([2, 2]))
x = variables.Variable(array_ops.zeros([1, 3]))
a = v + w
self.assertDeviceEqual("/job:ps/task:0", u.device)
self.assertDeviceEqual("/job:ps/task:0", u.initializer.device)
self.assertDeviceEqual("/job:ps/task:1", v.device)
self.assertDeviceEqual("/job:ps/task:1", v.initializer.device)
self.assertDeviceEqual("/job:ps/task:0", w.device)
self.assertDeviceEqual("/job:ps/task:0", w.initializer.device)
self.assertDeviceEqual("/job:ps/task:1", x.device)
self.assertDeviceEqual("/job:ps/task:1", x.initializer.device)
self.assertDeviceEqual("/job:worker", a.device)
def testByteSizeLoadFn(self):
with ops.device(
device_setter.replica_device_setter(
cluster=_CLUSTER_SPEC,
ps_strategy=device_setter_lib.GreedyLoadBalancingStrategy(
2, device_setter_lib.byte_size_load_fn))):
u = variables.Variable(array_ops.zeros([2, 2]))
v = variables.Variable(array_ops.zeros([2, 1]))
w = variables.Variable(array_ops.zeros([2, 2]))
x = variables.Variable(array_ops.zeros([1, 3]))
a = v + w
self.assertDeviceEqual("/job:ps/task:0", u.device)
self.assertDeviceEqual("/job:ps/task:0", u.initializer.device)
self.assertDeviceEqual("/job:ps/task:1", v.device)
self.assertDeviceEqual("/job:ps/task:1", v.initializer.device)
self.assertDeviceEqual("/job:ps/task:1", w.device)
self.assertDeviceEqual("/job:ps/task:1", w.initializer.device)
self.assertDeviceEqual("/job:ps/task:0", x.device)
self.assertDeviceEqual("/job:ps/task:0", x.initializer.device)
self.assertDeviceEqual("/job:worker", a.device)
def testByteSizeLoadFnWithScalar(self):
with ops.device(
device_setter.replica_device_setter(
cluster=_CLUSTER_SPEC,
ps_strategy=device_setter_lib.GreedyLoadBalancingStrategy(
2, device_setter_lib.byte_size_load_fn))):
# Note: we must test the load function as part of the device function
# instead of passing u.op to the function directly, because the only
# time that the output Tensor has unknown shape for scalars is during
# Variable construction.
u = variables.Variable(0)
self.assertDeviceEqual("/job:ps/task:0", u.device)
self.assertDeviceEqual("/job:ps/task:0", u.initializer.device)
if __name__ == "__main__":
test.main()
| apache-2.0 |
sadleader/odoo | addons/l10n_in_hr_payroll/wizard/hr_salary_employee_bymonth.py | 374 | 2830 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class hr_salary_employee_bymonth(osv.osv_memory):
_name = 'hr.salary.employee.month'
_description = 'Hr Salary Employee By Month Report'
_columns = {
'start_date': fields.date('Start Date', required=True),
'end_date': fields.date('End Date', required=True),
'employee_ids': fields.many2many('hr.employee', 'payroll_year_rel', 'payroll_year_id', 'employee_id', 'Employees', required=True),
'category_id': fields.many2one('hr.salary.rule.category', 'Category', required=True),
}
def _get_default_category(self, cr, uid, context=None):
category_ids = self.pool.get('hr.salary.rule.category').search(cr, uid, [('code', '=', 'NET')], context=context)
return category_ids and category_ids[0] or False
_defaults = {
'start_date': lambda *a: time.strftime('%Y-01-01'),
'end_date': lambda *a: time.strftime('%Y-%m-%d'),
'category_id': _get_default_category
}
def print_report(self, cr, uid, ids, context=None):
"""
To get the date and print the report
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: return report
"""
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
res = self.read(cr, uid, ids, context=context)
res = res and res[0] or {}
datas.update({'form': res})
return self.pool['report'].get_action(cr, uid, ids,
'l10n_in_hr_payroll.report_hrsalarybymonth',
data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
proxysh/Safejumper-for-Desktop | buildmac/Resources/env/lib/python2.7/site-packages/twisted/test/test_ident.py | 10 | 7012 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for twisted.protocols.ident module.
"""
import struct
from twisted.protocols import ident
from twisted.python import failure
from twisted.internet import error
from twisted.internet import defer
from twisted.python.compat import NativeStringIO
from twisted.trial import unittest
from twisted.test.proto_helpers import StringTransport
try:
import builtins
except ImportError:
import __builtin__ as builtins
class ClassParserTests(unittest.TestCase):
"""
Test parsing of ident responses.
"""
def setUp(self):
"""
Create an ident client used in tests.
"""
self.client = ident.IdentClient()
def test_indentError(self):
"""
'UNKNOWN-ERROR' error should map to the L{ident.IdentError} exception.
"""
d = defer.Deferred()
self.client.queries.append((d, 123, 456))
self.client.lineReceived('123, 456 : ERROR : UNKNOWN-ERROR')
return self.assertFailure(d, ident.IdentError)
def test_noUSerError(self):
"""
'NO-USER' error should map to the L{ident.NoUser} exception.
"""
d = defer.Deferred()
self.client.queries.append((d, 234, 456))
self.client.lineReceived('234, 456 : ERROR : NO-USER')
return self.assertFailure(d, ident.NoUser)
def test_invalidPortError(self):
"""
'INVALID-PORT' error should map to the L{ident.InvalidPort} exception.
"""
d = defer.Deferred()
self.client.queries.append((d, 345, 567))
self.client.lineReceived('345, 567 : ERROR : INVALID-PORT')
return self.assertFailure(d, ident.InvalidPort)
def test_hiddenUserError(self):
"""
'HIDDEN-USER' error should map to the L{ident.HiddenUser} exception.
"""
d = defer.Deferred()
self.client.queries.append((d, 567, 789))
self.client.lineReceived('567, 789 : ERROR : HIDDEN-USER')
return self.assertFailure(d, ident.HiddenUser)
def test_lostConnection(self):
"""
A pending query which failed because of a ConnectionLost should
receive an L{ident.IdentError}.
"""
d = defer.Deferred()
self.client.queries.append((d, 765, 432))
self.client.connectionLost(failure.Failure(error.ConnectionLost()))
return self.assertFailure(d, ident.IdentError)
class TestIdentServer(ident.IdentServer):
def lookup(self, serverAddress, clientAddress):
return self.resultValue
class TestErrorIdentServer(ident.IdentServer):
def lookup(self, serverAddress, clientAddress):
raise self.exceptionType()
class NewException(RuntimeError):
pass
class ServerParserTests(unittest.TestCase):
def testErrors(self):
p = TestErrorIdentServer()
p.makeConnection(StringTransport())
L = []
p.sendLine = L.append
p.exceptionType = ident.IdentError
p.lineReceived('123, 345')
self.assertEqual(L[0], '123, 345 : ERROR : UNKNOWN-ERROR')
p.exceptionType = ident.NoUser
p.lineReceived('432, 210')
self.assertEqual(L[1], '432, 210 : ERROR : NO-USER')
p.exceptionType = ident.InvalidPort
p.lineReceived('987, 654')
self.assertEqual(L[2], '987, 654 : ERROR : INVALID-PORT')
p.exceptionType = ident.HiddenUser
p.lineReceived('756, 827')
self.assertEqual(L[3], '756, 827 : ERROR : HIDDEN-USER')
p.exceptionType = NewException
p.lineReceived('987, 789')
self.assertEqual(L[4], '987, 789 : ERROR : UNKNOWN-ERROR')
errs = self.flushLoggedErrors(NewException)
self.assertEqual(len(errs), 1)
for port in -1, 0, 65536, 65537:
del L[:]
p.lineReceived('%d, 5' % (port,))
p.lineReceived('5, %d' % (port,))
self.assertEqual(
L, ['%d, 5 : ERROR : INVALID-PORT' % (port,),
'5, %d : ERROR : INVALID-PORT' % (port,)])
def testSuccess(self):
p = TestIdentServer()
p.makeConnection(StringTransport())
L = []
p.sendLine = L.append
p.resultValue = ('SYS', 'USER')
p.lineReceived('123, 456')
self.assertEqual(L[0], '123, 456 : USERID : SYS : USER')
if struct.pack('=L', 1)[0] == '\x01':
_addr1 = '0100007F'
_addr2 = '04030201'
else:
_addr1 = '7F000001'
_addr2 = '01020304'
class ProcMixinTests(unittest.TestCase):
line = ('4: %s:0019 %s:02FA 0A 00000000:00000000 '
'00:00000000 00000000 0 0 10927 1 f72a5b80 '
'3000 0 0 2 -1') % (_addr1, _addr2)
sampleFile = (' sl local_address rem_address st tx_queue rx_queue tr '
'tm->when retrnsmt uid timeout inode\n ' + line)
def testDottedQuadFromHexString(self):
p = ident.ProcServerMixin()
self.assertEqual(p.dottedQuadFromHexString(_addr1), '127.0.0.1')
def testUnpackAddress(self):
p = ident.ProcServerMixin()
self.assertEqual(p.unpackAddress(_addr1 + ':0277'),
('127.0.0.1', 631))
def testLineParser(self):
p = ident.ProcServerMixin()
self.assertEqual(
p.parseLine(self.line),
(('127.0.0.1', 25), ('1.2.3.4', 762), 0))
def testExistingAddress(self):
username = []
p = ident.ProcServerMixin()
p.entries = lambda: iter([self.line])
p.getUsername = lambda uid: (username.append(uid), 'root')[1]
self.assertEqual(
p.lookup(('127.0.0.1', 25), ('1.2.3.4', 762)),
(p.SYSTEM_NAME, 'root'))
self.assertEqual(username, [0])
def testNonExistingAddress(self):
p = ident.ProcServerMixin()
p.entries = lambda: iter([self.line])
self.assertRaises(ident.NoUser, p.lookup, ('127.0.0.1', 26),
('1.2.3.4', 762))
self.assertRaises(ident.NoUser, p.lookup, ('127.0.0.1', 25),
('1.2.3.5', 762))
self.assertRaises(ident.NoUser, p.lookup, ('127.0.0.1', 25),
('1.2.3.4', 763))
def testLookupProcNetTcp(self):
"""
L{ident.ProcServerMixin.lookup} uses the Linux TCP process table.
"""
open_calls = []
def mocked_open(*args, **kwargs):
"""
Mock for the open call to prevent actually opening /proc/net/tcp.
"""
open_calls.append((args, kwargs))
return NativeStringIO(self.sampleFile)
self.patch(builtins, 'open', mocked_open)
p = ident.ProcServerMixin()
self.assertRaises(ident.NoUser, p.lookup, ('127.0.0.1', 26),
('1.2.3.4', 762))
self.assertEqual([(('/proc/net/tcp',), {})], open_calls)
| gpl-2.0 |
aaronsw/pytorctl | TorCtl/TorCtl.py | 1 | 64194 | #!/usr/bin/python
# TorCtl.py -- Python module to interface with Tor Control interface.
# Copyright 2005 Nick Mathewson
# Copyright 2007-2010 Mike Perry. See LICENSE file.
"""
Library to control Tor processes.
This library handles sending commands, parsing responses, and delivering
events to and from the control port. The basic usage is to create a
socket, wrap that in a TorCtl.Connection, and then add an EventHandler
to that connection. A simple example with a DebugEventHandler (that just
echoes the events back to stdout) is present in run_example().
Note that the TorCtl.Connection is fully compatible with the more
advanced EventHandlers in TorCtl.PathSupport (and of course any other
custom event handlers that you may extend off of those).
This package also contains a helper class for representing Routers, and
classes and constants for each event.
To quickly fetch a TorCtl instance to experiment with use the following:
>>> import TorCtl
>>> conn = TorCtl.connect()
>>> conn.get_info("version")["version"]
'0.2.1.24'
"""
__all__ = ["EVENT_TYPE", "connect", "TorCtlError", "TorCtlClosed",
"ProtocolError", "ErrorReply", "NetworkStatus", "ExitPolicyLine",
"Router", "RouterVersion", "Connection", "parse_ns_body",
"EventHandler", "DebugEventHandler", "NetworkStatusEvent",
"NewDescEvent", "CircuitEvent", "StreamEvent", "ORConnEvent",
"StreamBwEvent", "LogEvent", "AddrMapEvent", "BWEvent",
"BuildTimeoutSetEvent", "UnknownEvent", "ConsensusTracker",
"EventListener", "EVENT_STATE" ]
import os
import re
import struct
import sys
import threading
import Queue
import datetime
import traceback
import socket
import getpass
import binascii
import types
import time
import copy
from TorUtil import *
if sys.version_info < (2, 5):
from sets import Set as set
from sha import sha as sha1
else:
from hashlib import sha1
# Types of "EVENT" message.
EVENT_TYPE = Enum2(
CIRC="CIRC",
STREAM="STREAM",
ORCONN="ORCONN",
STREAM_BW="STREAM_BW",
BW="BW",
NS="NS",
NEWCONSENSUS="NEWCONSENSUS",
BUILDTIMEOUT_SET="BUILDTIMEOUT_SET",
GUARD="GUARD",
NEWDESC="NEWDESC",
ADDRMAP="ADDRMAP",
DEBUG="DEBUG",
INFO="INFO",
NOTICE="NOTICE",
WARN="WARN",
ERR="ERR")
EVENT_STATE = Enum2(
PRISTINE="PRISTINE",
PRELISTEN="PRELISTEN",
HEARTBEAT="HEARTBEAT",
HANDLING="HANDLING",
POSTLISTEN="POSTLISTEN",
DONE="DONE")
# Types of control port authentication
AUTH_TYPE = Enum2(
NONE="NONE",
PASSWORD="PASSWORD",
COOKIE="COOKIE")
INCORRECT_PASSWORD_MSG = "Provided passphrase was incorrect"
def connect(controlAddr="127.0.0.1", controlPort=9051, passphrase=None):
"""
Convenience function for quickly getting a TorCtl connection. This is very
handy for debugging or CLI setup, handling setup and prompting for a password
if necessary (if either none is provided as input or it fails). If any issues
arise this prints a description of the problem and returns None.
Arguments:
controlAddr - ip address belonging to the controller
controlPort - port belonging to the controller
passphrase - authentication passphrase (if defined this is used rather
than prompting the user)
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((controlAddr, controlPort))
conn = Connection(s)
authType, authValue = conn.get_auth_type(), ""
if authType == AUTH_TYPE.PASSWORD:
# password authentication, promting for the password if it wasn't provided
if passphrase: authValue = passphrase
else:
try: authValue = getpass.getpass()
except KeyboardInterrupt: return None
elif authType == AUTH_TYPE.COOKIE:
authValue = conn.get_auth_cookie_path()
conn.authenticate(authValue)
return conn
except socket.error, exc:
if "Connection refused" in exc.args:
# most common case - tor control port isn't available
print "Connection refused. Is the ControlPort enabled?"
else: print "Failed to establish socket: %s" % exc
return None
except Exception, exc:
if passphrase and str(exc) == "Unable to authenticate: password incorrect":
# provide a warning that the provided password didn't work, then try
# again prompting for the user to enter it
print INCORRECT_PASSWORD_MSG
return connect(controlAddr, controlPort)
else:
print exc
return None
class TorCtlError(Exception):
"Generic error raised by TorControl code."
pass
class TorCtlClosed(TorCtlError):
"Raised when the controller connection is closed by Tor (not by us.)"
pass
class ProtocolError(TorCtlError):
"Raised on violations in Tor controller protocol"
pass
class ErrorReply(TorCtlError):
"Raised when Tor controller returns an error"
def __init__(self, *args, **kwargs):
if "status" in kwargs:
self.status = kwargs.pop("status")
if "message" in kwargs:
self.message = kwargs.pop("message")
TorCtlError.__init__(self, *args, **kwargs)
class NetworkStatus:
"Filled in during NS events"
def __init__(self, nickname, idhash, orhash, updated, ip, orport, dirport, flags, bandwidth=None):
self.nickname = nickname
self.idhash = idhash
self.orhash = orhash
self.ip = ip
self.orport = int(orport)
self.dirport = int(dirport)
self.flags = flags
self.idhex = (self.idhash + "=").decode("base64").encode("hex").upper()
self.bandwidth = bandwidth
m = re.search(r"(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)", updated)
self.updated = datetime.datetime(*map(int, m.groups()))
class Event:
def __init__(self, event_name):
self.event_name = event_name
self.arrived_at = 0
self.state = EVENT_STATE.PRISTINE
class TimerEvent(Event):
def __init__(self, event_name, type):
Event.__init__(self, event_name)
self.type = type
class NetworkStatusEvent(Event):
def __init__(self, event_name, nslist):
Event.__init__(self, event_name)
self.nslist = nslist # List of NetworkStatus objects
class NewConsensusEvent(NetworkStatusEvent):
pass
class NewDescEvent(Event):
def __init__(self, event_name, idlist):
Event.__init__(self, event_name)
self.idlist = idlist
class GuardEvent(Event):
def __init__(self, event_name, ev_type, guard, status):
Event.__init__(self, event_name)
if "~" in guard:
(self.idhex, self.nick) = guard[1:].split("~")
elif "=" in guard:
(self.idhex, self.nick) = guard[1:].split("=")
else:
self.idhex = guard[1:]
self.status = status
class BuildTimeoutSetEvent(Event):
def __init__(self, event_name, set_type, total_times, timeout_ms, xm, alpha,
quantile):
Event.__init__(self, event_name)
self.set_type = set_type
self.total_times = total_times
self.timeout_ms = timeout_ms
self.xm = xm
self.alpha = alpha
self.cutoff_quantile = quantile
class CircuitEvent(Event):
def __init__(self, event_name, circ_id, status, path, purpose,
reason, remote_reason):
Event.__init__(self, event_name)
self.circ_id = circ_id
self.status = status
self.path = path
self.purpose = purpose
self.reason = reason
self.remote_reason = remote_reason
class StreamEvent(Event):
def __init__(self, event_name, strm_id, status, circ_id, target_host,
target_port, reason, remote_reason, source, source_addr, purpose):
Event.__init__(self, event_name)
self.strm_id = strm_id
self.status = status
self.circ_id = circ_id
self.target_host = target_host
self.target_port = int(target_port)
self.reason = reason
self.remote_reason = remote_reason
self.source = source
self.source_addr = source_addr
self.purpose = purpose
class ORConnEvent(Event):
def __init__(self, event_name, status, endpoint, age, read_bytes,
wrote_bytes, reason, ncircs):
Event.__init__(self, event_name)
self.status = status
self.endpoint = endpoint
self.age = age
self.read_bytes = read_bytes
self.wrote_bytes = wrote_bytes
self.reason = reason
self.ncircs = ncircs
class StreamBwEvent(Event):
def __init__(self, event_name, strm_id, written, read):
Event.__init__(self, event_name)
self.strm_id = int(strm_id)
self.bytes_read = int(read)
self.bytes_written = int(written)
class LogEvent(Event):
def __init__(self, level, msg):
Event.__init__(self, level)
self.level = level
self.msg = msg
class AddrMapEvent(Event):
def __init__(self, event_name, from_addr, to_addr, when):
Event.__init__(self, event_name)
self.from_addr = from_addr
self.to_addr = to_addr
self.when = when
class AddrMap:
def __init__(self, from_addr, to_addr, when):
self.from_addr = from_addr
self.to_addr = to_addr
self.when = when
class BWEvent(Event):
def __init__(self, event_name, read, written):
Event.__init__(self, event_name)
self.read = read
self.written = written
class UnknownEvent(Event):
def __init__(self, event_name, event_string):
Event.__init__(self, event_name)
self.event_string = event_string
ipaddress_re = re.compile(r"(\d{1,3}\.){3}\d{1,3}$")
class ExitPolicyLine:
""" Class to represent a line in a Router's exit policy in a way
that can be easily checked. """
def __init__(self, match, ip_mask, port_low, port_high):
self.match = match
if ip_mask == "*":
self.ip = 0
self.netmask = 0
else:
if not "/" in ip_mask:
self.netmask = 0xFFFFFFFF
ip = ip_mask
else:
ip, mask = ip_mask.split("/")
if ipaddress_re.match(mask):
self.netmask=struct.unpack(">I", socket.inet_aton(mask))[0]
else:
self.netmask = 0xffffffff ^ (0xffffffff >> int(mask))
self.ip = struct.unpack(">I", socket.inet_aton(ip))[0]
self.ip &= self.netmask
if port_low == "*":
self.port_low,self.port_high = (0,65535)
else:
if not port_high:
port_high = port_low
self.port_low = int(port_low)
self.port_high = int(port_high)
def check(self, ip, port):
"""Check to see if an ip and port is matched by this line.
Returns true if the line is an Accept, and False if it is a Reject. """
ip = struct.unpack(">I", socket.inet_aton(ip))[0]
if (ip & self.netmask) == self.ip:
if self.port_low <= port and port <= self.port_high:
return self.match
return -1
def __str__(self):
retr = ""
if self.match:
retr += "accept "
else:
retr += "reject "
retr += socket.inet_ntoa(struct.pack(">I",self.ip)) + "/"
retr += socket.inet_ntoa(struct.pack(">I",self.netmask)) + ":"
retr += str(self.port_low)+"-"+str(self.port_high)
return retr
class RouterVersion:
""" Represents a Router's version. Overloads all comparison operators
to check for newer, older, or equivalent versions. """
def __init__(self, version):
if version:
v = re.search("^(\d+)\.(\d+)\.(\d+)\.(\d+)", version).groups()
self.version = int(v[0])*0x1000000 + int(v[1])*0x10000 + int(v[2])*0x100 + int(v[3])
self.ver_string = version
else:
self.version = version
self.ver_string = "unknown"
def __lt__(self, other): return self.version < other.version
def __gt__(self, other): return self.version > other.version
def __ge__(self, other): return self.version >= other.version
def __le__(self, other): return self.version <= other.version
def __eq__(self, other): return self.version == other.version
def __ne__(self, other): return self.version != other.version
def __str__(self): return self.ver_string
# map descriptor keywords to regular expressions.
desc_re = {
"router": r"(\S+) (\S+)",
"opt fingerprint": r"(.+).*on (\S+)",
"opt extra-info-digest": r"(\S+)",
"opt hibernating": r"1$",
"platform": r"Tor (\S+).*on ([\S\s]+)",
"accept": r"(\S+):([^-]+)(?:-(\d+))?",
"reject": r"(\S+):([^-]+)(?:-(\d+))?",
"bandwidth": r"(\d+) \d+ (\d+)",
"uptime": r"(\d+)",
"contact": r"(.+)",
"published": r"(\S+ \S+)",
}
# Compile each regular expression now.
for kw, reg in desc_re.iteritems():
desc_re[kw] = re.compile(reg)
def partition(string, delimiter):
""" Implementation of string.partition-like function for Python <
2.5. Returns a tuple (first, rest), where first is the text up to
the first delimiter, and rest is the text after the first delimiter.
"""
sp = string.split(delimiter, 1)
if len(sp) > 1:
return sp[0], sp[1]
else:
return sp[0], ""
class Router:
"""
Class to represent a router from a descriptor. Can either be
created from the parsed fields, or can be built from a
descriptor+NetworkStatus
"""
def __init__(self, *args):
if len(args) == 1:
for i in args[0].__dict__:
self.__dict__[i] = copy.deepcopy(args[0].__dict__[i])
return
else:
(idhex, name, bw, down, exitpolicy, flags, ip, version, os, uptime,
published, contact, rate_limited, orhash,
ns_bandwidth,extra_info_digest) = args
self.idhex = idhex
self.nickname = name
if ns_bandwidth != None:
self.bw = ns_bandwidth
else:
self.bw = bw
self.desc_bw = bw
self.exitpolicy = exitpolicy
self.flags = flags # Technicaly from NS doc
self.down = down
self.ip = struct.unpack(">I", socket.inet_aton(ip))[0]
self.version = RouterVersion(version)
self.os = os
self.list_rank = 0 # position in a sorted list of routers.
self.uptime = uptime
self.published = published
self.refcount = 0 # How many open circs are we currently in?
self.deleted = False # Has Tor already deleted this descriptor?
self.contact = contact
self.rate_limited = rate_limited
self.orhash = orhash
self.extra_info_digest = extra_info_digest
self._generated = [] # For ExactUniformGenerator
def __str__(self):
s = self.idhex, self.nickname
return s.__str__()
def build_from_desc(desc, ns):
"""
Static method of Router that parses a descriptor string into this class.
'desc' is a full descriptor as a string.
'ns' is a TorCtl.NetworkStatus instance for this router (needed for
the flags, the nickname, and the idhex string).
Returns a Router instance.
"""
exitpolicy = []
dead = not ("Running" in ns.flags)
bw_observed = 0
version = None
os = None
uptime = 0
ip = 0
router = "[none]"
published = "never"
contact = None
extra_info_digest = None
for line in desc:
# Pull off the keyword...
kw, rest = partition(line, " ")
# ...and if it's "opt", extend it by the next keyword
# so we get "opt hibernating" as one keyword.
if kw == "opt":
okw, rest = partition(rest, " ")
kw += " " + okw
# try to match the descriptor line by keyword.
try:
match = desc_re[kw].match(rest)
# if we don't handle this keyword, just move on to the next one.
except KeyError:
continue
# if we do handle this keyword but its data is malformed,
# move on to the next one without processing it.
if not match:
continue
g = match.groups()
# Handle each keyword individually.
# TODO: This could possibly be sped up since we technically already
# did the compare with the dictionary lookup... lambda magic time.
if kw == "accept":
exitpolicy.append(ExitPolicyLine(True, *g))
elif kw == "reject":
exitpolicy.append(ExitPolicyLine(False, *g))
elif kw == "router":
router,ip = g
elif kw == "bandwidth":
bws = map(int, g)
bw_observed = min(bws)
rate_limited = False
if bws[0] < bws[1]:
rate_limited = True
elif kw == "platform":
version, os = g
elif kw == "uptime":
uptime = int(g[0])
elif kw == "published":
t = time.strptime(g[0] + " UTC", "20%y-%m-%d %H:%M:%S %Z")
published = datetime.datetime(*t[0:6])
elif kw == "contact":
contact = g[0]
elif kw == "opt extra-info-digest":
extra_info_digest = g[0]
elif kw == "opt hibernating":
dead = True
if ("Running" in ns.flags):
plog("INFO", "Hibernating router "+ns.nickname+" is running, flags: "+" ".join(ns.flags))
if router != ns.nickname:
plog("INFO", "Got different names " + ns.nickname + " vs " +
router + " for " + ns.idhex)
if not bw_observed and not dead and ("Valid" in ns.flags):
plog("INFO", "No bandwidth for live router "+ns.nickname+", flags: "+" ".join(ns.flags))
dead = True
if not version or not os:
plog("INFO", "No version and/or OS for router " + ns.nickname)
return Router(ns.idhex, ns.nickname, bw_observed, dead, exitpolicy,
ns.flags, ip, version, os, uptime, published, contact, rate_limited,
ns.orhash, ns.bandwidth, extra_info_digest)
build_from_desc = Callable(build_from_desc)
def update_to(self, new):
""" Somewhat hackish method to update this router to be a copy of
'new' """
if self.idhex != new.idhex:
plog("ERROR", "Update of router "+self.nickname+"changes idhex!")
for i in new.__dict__.iterkeys():
if i == "refcount" or i == "_generated": continue
self.__dict__[i] = new.__dict__[i]
def will_exit_to(self, ip, port):
""" Check the entire exitpolicy to see if the router will allow
connections to 'ip':'port' """
for line in self.exitpolicy:
ret = line.check(ip, port)
if ret != -1:
return ret
plog("WARN", "No matching exit line for "+self.nickname)
return False
class Connection:
"""A Connection represents a connection to the Tor process via the
control port."""
def __init__(self, sock):
"""Create a Connection to communicate with the Tor process over the
socket 'sock'.
"""
self._handler = None
self._handleFn = None
self._sendLock = threading.RLock()
self._queue = Queue.Queue()
self._thread = None
self._closedEx = None
self._closed = 0
self._closeHandler = None
self._eventThread = None
self._eventQueue = Queue.Queue()
self._s = BufSock(sock)
self._debugFile = None
# authentication information (lazily fetched so None if still unknown)
self._authType = None
self._cookiePath = None
def get_auth_type(self):
"""
Provides the authentication type used for the control port (a member of
the AUTH_TYPE enumeration). This raises an IOError if this fails to query
the PROTOCOLINFO.
"""
if self._authType: return self._authType
else:
# check PROTOCOLINFO for authentication type
try:
authInfo = self.sendAndRecv("PROTOCOLINFO\r\n")[1][1]
except ErrorReply, exc:
raise IOError("Unable to query PROTOCOLINFO for the authentication type: %s" % exc)
authType, cookiePath = None, None
if authInfo.startswith("AUTH METHODS=NULL"):
# no authentication required
authType = AUTH_TYPE.NONE
elif authInfo.startswith("AUTH METHODS=HASHEDPASSWORD"):
# password authentication
authType = AUTH_TYPE.PASSWORD
elif authInfo.startswith("AUTH METHODS=COOKIE"):
# cookie authentication, parses authentication cookie path
authType = AUTH_TYPE.COOKIE
start = authInfo.find("COOKIEFILE=\"") + 12
end = authInfo.find("\"", start)
cookiePath = authInfo[start:end]
else:
# not of a recognized authentication type (new addition to the
# control-spec?)
raise IOError("Unrecognized authentication type: %s" % authInfo)
self._authType = authType
self._cookiePath = cookiePath
return self._authType
def get_auth_cookie_path(self):
"""
Provides the path of tor's authentication cookie. If the connection isn't
using cookie authentication then this provides None. This raises an IOError
if PROTOCOLINFO can't be queried.
"""
# fetches authentication type and cookie path if still unloaded
if self._authType == None: self.get_auth_type()
if self._authType == AUTH_TYPE.COOKIE:
return self._cookiePath
else:
return None
def set_close_handler(self, handler):
"""Call 'handler' when the Tor process has closed its connection or
given us an exception. If we close normally, no arguments are
provided; otherwise, it will be called with an exception as its
argument.
"""
self._closeHandler = handler
def close(self):
"""Shut down this controller connection"""
self._sendLock.acquire()
try:
self._queue.put("CLOSE")
self._eventQueue.put((time.time(), "CLOSE"))
self._closed = 1
# XXX: For some reason, this does not cause the readline in
# self._read_reply() to return immediately. The _loop() thread
# thus tends to stick around until some event causes data to come
# back...
self._s.close()
self._eventThread.join()
finally:
self._sendLock.release()
def is_live(self):
""" Returns true iff the connection is alive and healthy"""
return self._thread.isAlive() and self._eventThread.isAlive() and not \
self._closed
def launch_thread(self, daemon=1):
"""Launch a background thread to handle messages from the Tor process."""
assert self._thread is None
t = threading.Thread(target=self._loop, name="TorLoop")
if daemon:
t.setDaemon(daemon)
t.start()
self._thread = t
t = threading.Thread(target=self._eventLoop, name="EventLoop")
if daemon:
t.setDaemon(daemon)
t.start()
self._eventThread = t
# eventThread provides a more reliable indication of when we are done.
# The _loop thread won't always die when self.close() is called.
return self._eventThread
def _loop(self):
"""Main subthread loop: Read commands from Tor, and handle them either
as events or as responses to other commands.
"""
while 1:
try:
isEvent, reply = self._read_reply()
except TorCtlClosed:
plog("NOTICE", "Tor closed control connection. Exiting event thread.")
return
except Exception,e:
if not self._closed:
if sys:
self._err(sys.exc_info())
else:
plog("NOTICE", "No sys left at exception shutdown: "+str(e))
self._err((e.__class__, e, None))
return
else:
isEvent = 0
if isEvent:
if self._handler is not None:
self._eventQueue.put((time.time(), reply))
else:
cb = self._queue.get() # atomic..
if cb == "CLOSE":
self._s = None
plog("INFO", "Closed control connection. Exiting thread.")
return
else:
cb(reply)
def _err(self, (tp, ex, tb), fromEventLoop=0):
"""DOCDOC"""
# silent death is bad :(
traceback.print_exception(tp, ex, tb)
if self._s:
try:
self.close()
except:
pass
self._sendLock.acquire()
try:
self._closedEx = ex
self._closed = 1
finally:
self._sendLock.release()
while 1:
try:
cb = self._queue.get(timeout=0)
if cb != "CLOSE":
cb("EXCEPTION")
except Queue.Empty:
break
if self._closeHandler is not None:
self._closeHandler(ex)
# I hate you for making me resort to this, python
os.kill(os.getpid(), 15)
return
def _eventLoop(self):
"""DOCDOC"""
while 1:
(timestamp, reply) = self._eventQueue.get()
if reply[0][0] == "650" and reply[0][1] == "OK":
plog("DEBUG", "Ignoring incompatible syntactic sugar: 650 OK")
continue
if reply == "CLOSE":
plog("INFO", "Event loop received close message.")
return
try:
self._handleFn(timestamp, reply)
except:
for code, msg, data in reply:
plog("WARN", "No event for: "+str(code)+" "+str(msg))
self._err(sys.exc_info(), 1)
return
def _sendImpl(self, sendFn, msg):
"""DOCDOC"""
if self._thread is None and not self._closed:
self.launch_thread(1)
# This condition will get notified when we've got a result...
condition = threading.Condition()
# Here's where the result goes...
result = []
if self._closedEx is not None:
raise self._closedEx
elif self._closed:
raise TorCtlClosed()
def cb(reply,condition=condition,result=result):
condition.acquire()
try:
result.append(reply)
condition.notify()
finally:
condition.release()
# Sends a message to Tor...
self._sendLock.acquire() # ensure queue+sendmsg is atomic
try:
self._queue.put(cb)
sendFn(msg) # _doSend(msg)
finally:
self._sendLock.release()
# Now wait till the answer is in...
condition.acquire()
try:
while not result:
condition.wait()
finally:
condition.release()
# ...And handle the answer appropriately.
assert len(result) == 1
reply = result[0]
if reply == "EXCEPTION":
raise self._closedEx
return reply
def debug(self, f):
"""DOCDOC"""
self._debugFile = f
def set_event_handler(self, handler):
"""Cause future events from the Tor process to be sent to 'handler'.
"""
if self._handler:
handler.pre_listeners = self._handler.pre_listeners
handler.post_listeners = self._handler.post_listeners
self._handler = handler
self._handler.c = self
self._handleFn = handler._handle1
def add_event_listener(self, listener):
if not self._handler:
self.set_event_handler(EventHandler())
self._handler.add_event_listener(listener)
def block_until_close(self):
""" Blocks until the connection to the Tor process is interrupted"""
return self._eventThread.join()
def _read_reply(self):
lines = []
while 1:
line = self._s.readline()
if not line:
self._closed = True
raise TorCtlClosed()
line = line.strip()
if self._debugFile:
self._debugFile.write(str(time.time())+"\t %s\n" % line)
if len(line)<4:
raise ProtocolError("Badly formatted reply line: Too short")
code = line[:3]
tp = line[3]
s = line[4:]
if tp == "-":
lines.append((code, s, None))
elif tp == " ":
lines.append((code, s, None))
isEvent = (lines and lines[0][0][0] == '6')
return isEvent, lines
elif tp != "+":
raise ProtocolError("Badly formatted reply line: unknown type %r"%tp)
else:
more = []
while 1:
line = self._s.readline()
if self._debugFile:
self._debugFile.write("+++ %s" % line)
if line in (".\r\n", ".\n", "650 OK\n", "650 OK\r\n"):
break
more.append(line)
lines.append((code, s, unescape_dots("".join(more))))
isEvent = (lines and lines[0][0][0] == '6')
if isEvent: # Need "250 OK" if it's not an event. Otherwise, end
return (isEvent, lines)
# Notreached
raise TorCtlError()
def _doSend(self, msg):
if self._debugFile:
amsg = msg
lines = amsg.split("\n")
if len(lines) > 2:
amsg = "\n".join(lines[:2]) + "\n"
self._debugFile.write(str(time.time())+"\t>>> "+amsg)
self._s.write(msg)
def set_timer(self, in_seconds, type=None):
event = (("650", "TORCTL_TIMER", type),)
threading.Timer(in_seconds, lambda:
self._eventQueue.put((time.time(), event))).start()
def set_periodic_timer(self, every_seconds, type=None):
event = (("650", "TORCTL_TIMER", type),)
def notlambda():
plog("DEBUG", "Timer fired for type "+str(type))
self._eventQueue.put((time.time(), event))
self._eventQueue.put((time.time(), event))
threading.Timer(every_seconds, notlambda).start()
threading.Timer(every_seconds, notlambda).start()
def sendAndRecv(self, msg="", expectedTypes=("250", "251")):
"""Helper: Send a command 'msg' to Tor, and wait for a command
in response. If the response type is in expectedTypes,
return a list of (tp,body,extra) tuples. If it is an
error, raise ErrorReply. Otherwise, raise ProtocolError.
"""
if type(msg) == types.ListType:
msg = "".join(msg)
assert msg.endswith("\r\n")
lines = self._sendImpl(self._doSend, msg)
# print lines
for tp, msg, _ in lines:
if tp[0] in '45':
code = int(tp[:3])
raise ErrorReply("%s %s"%(tp, msg), status = code, message = msg)
if tp not in expectedTypes:
raise ProtocolError("Unexpectd message type %r"%tp)
return lines
def authenticate(self, secret=""):
"""
Authenticates to the control port. If an issue arises this raises either of
the following:
- IOError for failures in reading an authentication cookie or querying
PROTOCOLINFO.
- TorCtl.ErrorReply for authentication failures or if the secret is
undefined when using password authentication
"""
# fetches authentication type and cookie path if still unloaded
if self._authType == None: self.get_auth_type()
# validates input
if self._authType == AUTH_TYPE.PASSWORD and secret == "":
raise ErrorReply("Unable to authenticate: no passphrase provided")
authCookie = None
try:
if self._authType == AUTH_TYPE.NONE:
self.authenticate_password("")
elif self._authType == AUTH_TYPE.PASSWORD:
self.authenticate_password(secret)
else:
authCookie = open(self._cookiePath, "r")
self.authenticate_cookie(authCookie)
authCookie.close()
except ErrorReply, exc:
if authCookie: authCookie.close()
issue = str(exc)
# simplifies message if the wrong credentials were provided (common
# mistake)
if issue.startswith("515 Authentication failed: "):
if issue[27:].startswith("Password did not match"):
issue = "password incorrect"
elif issue[27:] == "Wrong length on authentication cookie.":
issue = "cookie value incorrect"
raise ErrorReply("Unable to authenticate: %s" % issue)
except IOError, exc:
if authCookie: authCookie.close()
issue = None
# cleaner message for common errors
if str(exc).startswith("[Errno 13] Permission denied"):
issue = "permission denied"
elif str(exc).startswith("[Errno 2] No such file or directory"):
issue = "file doesn't exist"
# if problem's recognized give concise message, otherwise print exception
# string
if issue: raise IOError("Failed to read authentication cookie (%s): %s" % (issue, self._cookiePath))
else: raise IOError("Failed to read authentication cookie: %s" % exc)
def authenticate_password(self, secret=""):
"""Sends an authenticating secret (password) to Tor. You'll need to call
this method (or authenticate_cookie) before Tor can start.
"""
#hexstr = binascii.b2a_hex(secret)
self.sendAndRecv("AUTHENTICATE \"%s\"\r\n"%secret)
def authenticate_cookie(self, cookie):
"""Sends an authentication cookie to Tor. This may either be a file or
its contents.
"""
# read contents if provided a file
if type(cookie) == file: cookie = cookie.read()
# unlike passwords the cookie contents isn't enclosed by quotes
self.sendAndRecv("AUTHENTICATE %s\r\n" % binascii.b2a_hex(cookie))
def get_option(self, name):
"""Get the value of the configuration option named 'name'. To
retrieve multiple values, pass a list for 'name' instead of
a string. Returns a list of (key,value) pairs.
Refer to section 3.3 of control-spec.txt for a list of valid names.
"""
if not isinstance(name, str):
name = " ".join(name)
lines = self.sendAndRecv("GETCONF %s\r\n" % name)
r = []
for _,line,_ in lines:
try:
key, val = line.split("=", 1)
r.append((key,val))
except ValueError:
r.append((line, None))
return r
def set_option(self, key, value):
"""Set the value of the configuration option 'key' to the value 'value'.
"""
self.set_options([(key, value)])
def set_options(self, kvlist):
"""Given a list of (key,value) pairs, set them as configuration
options.
"""
if not kvlist:
return
msg = " ".join(["%s=\"%s\""%(k,quote(v)) for k,v in kvlist])
self.sendAndRecv("SETCONF %s\r\n"%msg)
def reset_options(self, keylist):
"""Reset the options listed in 'keylist' to their default values.
Tor started implementing this command in version 0.1.1.7-alpha;
previous versions wanted you to set configuration keys to "".
That no longer works.
"""
self.sendAndRecv("RESETCONF %s\r\n"%(" ".join(keylist)))
def get_consensus(self):
"""Get the pristine Tor Consensus. Returns a list of
TorCtl.NetworkStatus instances."""
return parse_ns_body(self.sendAndRecv("GETINFO dir/status-vote/current/consensus\r\n")[0][2])
def get_network_status(self, who="all"):
"""Get the entire network status list. Returns a list of
TorCtl.NetworkStatus instances."""
return parse_ns_body(self.sendAndRecv("GETINFO ns/"+who+"\r\n")[0][2])
def get_address_mappings(self, type="all"):
# TODO: Also parse errors and GMTExpiry
body = self.sendAndRecv("GETINFO address-mappings/"+type+"\r\n")
#print "|"+body[0][1].replace("address-mappings/"+type+"=", "")+"|"
#print str(body[0])
if body[0][1].replace("address-mappings/"+type+"=", "") != "":
# one line
lines = [body[0][1].replace("address-mappings/"+type+"=", "")]
elif not body[0][2]:
return []
else:
lines = body[0][2].split("\n")
if not lines: return []
ret = []
for l in lines:
#print "|"+str(l)+"|"
if len(l) == 0: continue #Skip last line.. it's empty
m = re.match(r'(\S+)\s+(\S+)\s+(\"[^"]+\"|\w+)', l)
if not m:
raise ProtocolError("ADDRMAP response misformatted.")
fromaddr, toaddr, when = m.groups()
if when.upper() == "NEVER":
when = None
else:
when = time.strptime(when[1:-1], "%Y-%m-%d %H:%M:%S")
ret.append(AddrMap(fromaddr, toaddr, when))
return ret
def get_router(self, ns):
"""Fill in a Router class corresponding to a given NS class"""
desc = self.sendAndRecv("GETINFO desc/id/" + ns.idhex + "\r\n")[0][2]
sig_start = desc.find("\nrouter-signature\n")+len("\nrouter-signature\n")
fp_base64 = sha1(desc[:sig_start]).digest().encode("base64")[:-2]
r = Router.build_from_desc(desc.split("\n"), ns)
if fp_base64 != ns.orhash:
plog("INFO", "Router descriptor for "+ns.idhex+" does not match ns fingerprint (NS @ "+str(ns.updated)+" vs Desc @ "+str(r.published)+")")
return None
else:
return r
def read_routers(self, nslist):
""" Given a list a NetworkStatuses in 'nslist', this function will
return a list of new Router instances.
"""
bad_key = 0
new = []
for ns in nslist:
try:
r = self.get_router(ns)
if r:
new.append(r)
except ErrorReply:
bad_key += 1
if "Running" in ns.flags:
plog("NOTICE", "Running router "+ns.nickname+"="
+ns.idhex+" has no descriptor")
return new
def get_info(self, name):
"""Return the value of the internal information field named 'name'.
Refer to section 3.9 of control-spec.txt for a list of valid names.
DOCDOC
"""
if not isinstance(name, str):
name = " ".join(name)
lines = self.sendAndRecv("GETINFO %s\r\n"%name)
d = {}
for _,msg,more in lines:
if msg == "OK":
break
try:
k,rest = msg.split("=",1)
except ValueError:
raise ProtocolError("Bad info line %r",msg)
if more:
d[k] = more
else:
d[k] = rest
return d
def set_events(self, events, extended=False):
"""Change the list of events that the event handler is interested
in to those in 'events', which is a list of event names.
Recognized event names are listed in section 3.3 of the control-spec
"""
if extended:
plog ("DEBUG", "SETEVENTS EXTENDED %s\r\n" % " ".join(events))
self.sendAndRecv("SETEVENTS EXTENDED %s\r\n" % " ".join(events))
else:
self.sendAndRecv("SETEVENTS %s\r\n" % " ".join(events))
def save_conf(self):
"""Flush all configuration changes to disk.
"""
self.sendAndRecv("SAVECONF\r\n")
def send_signal(self, sig):
"""Send the signal 'sig' to the Tor process; The allowed values for
'sig' are listed in section 3.6 of control-spec.
"""
sig = { 0x01 : "HUP",
0x02 : "INT",
0x03 : "NEWNYM",
0x0A : "USR1",
0x0C : "USR2",
0x0F : "TERM" }.get(sig,sig)
self.sendAndRecv("SIGNAL %s\r\n"%sig)
def resolve(self, host):
""" Launch a remote hostname lookup request:
'host' may be a hostname or IPv4 address
"""
# TODO: handle "mode=reverse"
self.sendAndRecv("RESOLVE %s\r\n"%host)
def map_address(self, kvList):
""" Sends the MAPADDRESS command for each of the tuples in kvList """
if not kvList:
return
m = " ".join([ "%s=%s" for k,v in kvList])
lines = self.sendAndRecv("MAPADDRESS %s\r\n"%m)
r = []
for _,line,_ in lines:
try:
key, val = line.split("=", 1)
except ValueError:
raise ProtocolError("Bad address line %r",v)
r.append((key,val))
return r
def extend_circuit(self, circid=None, hops=None):
"""Tell Tor to extend the circuit identified by 'circid' through the
servers named in the list 'hops'.
"""
if circid is None:
circid = 0
if hops is None:
hops = ""
plog("DEBUG", "Extending circuit")
lines = self.sendAndRecv("EXTENDCIRCUIT %d %s\r\n"
%(circid, ",".join(hops)))
tp,msg,_ = lines[0]
m = re.match(r'EXTENDED (\S*)', msg)
if not m:
raise ProtocolError("Bad extended line %r",msg)
plog("DEBUG", "Circuit extended")
return int(m.group(1))
def redirect_stream(self, streamid, newaddr, newport=""):
"""DOCDOC"""
if newport:
self.sendAndRecv("REDIRECTSTREAM %d %s %s\r\n"%(streamid, newaddr, newport))
else:
self.sendAndRecv("REDIRECTSTREAM %d %s\r\n"%(streamid, newaddr))
def attach_stream(self, streamid, circid, hop=None):
"""Attach a stream to a circuit, specify both by IDs. If hop is given,
try to use the specified hop in the circuit as the exit node for
this stream.
"""
if hop:
self.sendAndRecv("ATTACHSTREAM %d %d HOP=%d\r\n"%(streamid, circid, hop))
plog("DEBUG", "Attaching stream: "+str(streamid)+" to hop "+str(hop)+" of circuit "+str(circid))
else:
self.sendAndRecv("ATTACHSTREAM %d %d\r\n"%(streamid, circid))
plog("DEBUG", "Attaching stream: "+str(streamid)+" to circuit "+str(circid))
def close_stream(self, streamid, reason=0, flags=()):
"""DOCDOC"""
self.sendAndRecv("CLOSESTREAM %d %s %s\r\n"
%(streamid, reason, "".join(flags)))
def close_circuit(self, circid, reason=0, flags=()):
"""DOCDOC"""
self.sendAndRecv("CLOSECIRCUIT %d %s %s\r\n"
%(circid, reason, "".join(flags)))
def post_descriptor(self, desc):
self.sendAndRecv("+POSTDESCRIPTOR purpose=controller\r\n%s"%escape_dots(desc))
def parse_ns_body(data):
"""Parse the body of an NS event or command into a list of
NetworkStatus instances"""
if not data: return []
nsgroups = re.compile(r"^r ", re.M).split(data)
nsgroups.pop(0)
nslist = []
for nsline in nsgroups:
m = re.search(r"^s((?:[ ]\S*)+)", nsline, re.M)
flags = m.groups()
flags = flags[0].strip().split(" ")
m = re.match(r"(\S+)\s(\S+)\s(\S+)\s(\S+\s\S+)\s(\S+)\s(\d+)\s(\d+)", nsline)
w = re.search(r"^w Bandwidth=(\d+)", nsline, re.M)
if w:
nslist.append(NetworkStatus(*(m.groups()+(flags,)+(int(w.group(1))*1000,))))
else:
nslist.append(NetworkStatus(*(m.groups() + (flags,))))
return nslist
class EventSink:
def heartbeat_event(self, event): pass
def unknown_event(self, event): pass
def circ_status_event(self, event): pass
def stream_status_event(self, event): pass
def stream_bw_event(self, event): pass
def or_conn_status_event(self, event): pass
def bandwidth_event(self, event): pass
def new_desc_event(self, event): pass
def msg_event(self, event): pass
def ns_event(self, event): pass
def new_consensus_event(self, event): pass
def buildtimeout_set_event(self, event): pass
def guard_event(self, event): pass
def address_mapped_event(self, event): pass
def timer_event(self, event): pass
class EventListener(EventSink):
"""An 'EventListener' is a passive sink for parsed Tor events. It
implements the same interface as EventHandler, but it should
not alter Tor's behavior as a result of these events.
Do not extend from this class. Instead, extend from one of
Pre, Post, or Dual event listener, to get events
before, after, or before and after the EventHandler handles them.
"""
def __init__(self):
"""Create a new EventHandler."""
self._map1 = {
"CIRC" : self.circ_status_event,
"STREAM" : self.stream_status_event,
"ORCONN" : self.or_conn_status_event,
"STREAM_BW" : self.stream_bw_event,
"BW" : self.bandwidth_event,
"DEBUG" : self.msg_event,
"INFO" : self.msg_event,
"NOTICE" : self.msg_event,
"WARN" : self.msg_event,
"ERR" : self.msg_event,
"NEWDESC" : self.new_desc_event,
"ADDRMAP" : self.address_mapped_event,
"NS" : self.ns_event,
"NEWCONSENSUS" : self.new_consensus_event,
"BUILDTIMEOUT_SET" : self.buildtimeout_set_event,
"GUARD" : self.guard_event,
"TORCTL_TIMER" : self.timer_event
}
self.parent_handler = None
self._sabotage()
def _sabotage(self):
raise TorCtlError("Error: Do not extend from EventListener directly! Use Pre, Post or DualEventListener instead.")
def listen(self, event):
self.heartbeat_event(event)
self._map1.get(event.event_name, self.unknown_event)(event)
def set_parent(self, parent_handler):
self.parent_handler = parent_handler
class PreEventListener(EventListener):
def _sabotage(self): pass
class PostEventListener(EventListener):
def _sabotage(self): pass
class DualEventListener(PreEventListener,PostEventListener):
def _sabotage(self): pass
class EventHandler(EventSink):
"""An 'EventHandler' wraps callbacks for the events Tor can return.
Each event argument is an instance of the corresponding event
class."""
def __init__(self):
"""Create a new EventHandler."""
self._map1 = {
"CIRC" : self.circ_status_event,
"STREAM" : self.stream_status_event,
"ORCONN" : self.or_conn_status_event,
"STREAM_BW" : self.stream_bw_event,
"BW" : self.bandwidth_event,
"DEBUG" : self.msg_event,
"INFO" : self.msg_event,
"NOTICE" : self.msg_event,
"WARN" : self.msg_event,
"ERR" : self.msg_event,
"NEWDESC" : self.new_desc_event,
"ADDRMAP" : self.address_mapped_event,
"NS" : self.ns_event,
"NEWCONSENSUS" : self.new_consensus_event,
"BUILDTIMEOUT_SET" : self.buildtimeout_set_event,
"GUARD" : self.guard_event,
"TORCTL_TIMER" : self.timer_event
}
self.c = None # Gets set by Connection.set_event_hanlder()
self.pre_listeners = []
self.post_listeners = []
def _handle1(self, timestamp, lines):
"""Dispatcher: called from Connection when an event is received."""
for code, msg, data in lines:
event = self._decode1(msg, data)
event.arrived_at = timestamp
event.state=EVENT_STATE.PRELISTEN
for l in self.pre_listeners:
l.listen(event)
event.state=EVENT_STATE.HEARTBEAT
self.heartbeat_event(event)
event.state=EVENT_STATE.HANDLING
self._map1.get(event.event_name, self.unknown_event)(event)
event.state=EVENT_STATE.POSTLISTEN
for l in self.post_listeners:
l.listen(event)
def _decode1(self, body, data):
"""Unpack an event message into a type/arguments-tuple tuple."""
if " " in body:
evtype,body = body.split(" ",1)
else:
evtype,body = body,""
evtype = evtype.upper()
if evtype == "CIRC":
m = re.match(r"(\d+)\s+(\S+)(\s\S+)?(\s\S+)?(\s\S+)?(\s\S+)?", body)
if not m:
raise ProtocolError("CIRC event misformatted.")
ident,status,path,purpose,reason,remote = m.groups()
ident = int(ident)
if path:
if "PURPOSE=" in path:
remote = reason
reason = purpose
purpose=path
path=[]
elif "REASON=" in path:
remote = reason
reason = path
purpose = ""
path=[]
else:
path_verb = path.strip().split(",")
path = []
for p in path_verb:
path.append(p.replace("~", "=").split("=")[0])
else:
path = []
if purpose and "REASON=" in purpose:
remote=reason
reason=purpose
purpose=""
if purpose: purpose = purpose[9:]
if reason: reason = reason[8:]
if remote: remote = remote[15:]
event = CircuitEvent(evtype, ident, status, path, purpose, reason, remote)
elif evtype == "STREAM":
#plog("DEBUG", "STREAM: "+body)
m = re.match(r"(\S+)\s+(\S+)\s+(\S+)\s+(\S+)?:(\d+)(\sREASON=\S+)?(\sREMOTE_REASON=\S+)?(\sSOURCE=\S+)?(\sSOURCE_ADDR=\S+)?(\s+PURPOSE=\S+)?", body)
if not m:
raise ProtocolError("STREAM event misformatted.")
ident,status,circ,target_host,target_port,reason,remote,source,source_addr,purpose = m.groups()
ident,circ = map(int, (ident,circ))
if not target_host: # This can happen on SOCKS_PROTOCOL failures
target_host = "(none)"
if reason: reason = reason[8:]
if remote: remote = remote[15:]
if source: source = source[8:]
if source_addr: source_addr = source_addr[13:]
if purpose:
purpose = purpose.lstrip()
purpose = purpose[8:]
event = StreamEvent(evtype, ident, status, circ, target_host,
int(target_port), reason, remote, source, source_addr, purpose)
elif evtype == "ORCONN":
m = re.match(r"(\S+)\s+(\S+)(\sAGE=\S+)?(\sREAD=\S+)?(\sWRITTEN=\S+)?(\sREASON=\S+)?(\sNCIRCS=\S+)?", body)
if not m:
raise ProtocolError("ORCONN event misformatted.")
target, status, age, read, wrote, reason, ncircs = m.groups()
#plog("DEBUG", "ORCONN: "+body)
if ncircs: ncircs = int(ncircs[8:])
else: ncircs = 0
if reason: reason = reason[8:]
if age: age = int(age[5:])
else: age = 0
if read: read = int(read[6:])
else: read = 0
if wrote: wrote = int(wrote[9:])
else: wrote = 0
event = ORConnEvent(evtype, status, target, age, read, wrote,
reason, ncircs)
elif evtype == "STREAM_BW":
m = re.match(r"(\d+)\s+(\d+)\s+(\d+)", body)
if not m:
raise ProtocolError("STREAM_BW event misformatted.")
event = StreamBwEvent(evtype, *m.groups())
elif evtype == "BW":
m = re.match(r"(\d+)\s+(\d+)", body)
if not m:
raise ProtocolError("BANDWIDTH event misformatted.")
read, written = map(long, m.groups())
event = BWEvent(evtype, read, written)
elif evtype in ("DEBUG", "INFO", "NOTICE", "WARN", "ERR"):
event = LogEvent(evtype, body)
elif evtype == "NEWDESC":
ids_verb = body.split(" ")
ids = []
for i in ids_verb:
ids.append(i.replace("~", "=").split("=")[0].replace("$",""))
event = NewDescEvent(evtype, ids)
elif evtype == "ADDRMAP":
# TODO: Also parse errors and GMTExpiry
m = re.match(r'(\S+)\s+(\S+)\s+(\"[^"]+\"|\w+)', body)
if not m:
raise ProtocolError("ADDRMAP event misformatted.")
fromaddr, toaddr, when = m.groups()
if when.upper() == "NEVER":
when = None
else:
when = time.strptime(when[1:-1], "%Y-%m-%d %H:%M:%S")
event = AddrMapEvent(evtype, fromaddr, toaddr, when)
elif evtype == "NS":
event = NetworkStatusEvent(evtype, parse_ns_body(data))
elif evtype == "NEWCONSENSUS":
event = NewConsensusEvent(evtype, parse_ns_body(data))
elif evtype == "BUILDTIMEOUT_SET":
m = re.match(
r"(\S+)\sTOTAL_TIMES=(\d+)\sTIMEOUT_MS=(\d+)\sXM=(\d+)\sALPHA=(\S+)\sCUTOFF_QUANTILE=(\S+)",
body)
set_type, total_times, timeout_ms, xm, alpha, quantile = m.groups()
event = BuildTimeoutSetEvent(evtype, set_type, int(total_times),
int(timeout_ms), int(xm), float(alpha),
float(quantile))
elif evtype == "GUARD":
m = re.match(r"(\S+)\s(\S+)\s(\S+)", body)
entry, guard, status = m.groups()
event = GuardEvent(evtype, entry, guard, status)
elif evtype == "TORCTL_TIMER":
event = TimerEvent(evtype, data)
else:
event = UnknownEvent(evtype, body)
return event
def add_event_listener(self, evlistener):
if isinstance(evlistener, PreEventListener):
self.pre_listeners.append(evlistener)
if isinstance(evlistener, PostEventListener):
self.post_listeners.append(evlistener)
evlistener.set_parent(self)
def heartbeat_event(self, event):
"""Called before any event is received. Convenience function
for any cleanup/setup/reconfiguration you may need to do.
"""
pass
def unknown_event(self, event):
"""Called when we get an event type we don't recognize. This
is almost alwyas an error.
"""
pass
def circ_status_event(self, event):
"""Called when a circuit status changes if listening to CIRCSTATUS
events."""
pass
def stream_status_event(self, event):
"""Called when a stream status changes if listening to STREAMSTATUS
events. """
pass
def stream_bw_event(self, event):
pass
def or_conn_status_event(self, event):
"""Called when an OR connection's status changes if listening to
ORCONNSTATUS events."""
pass
def bandwidth_event(self, event):
"""Called once a second if listening to BANDWIDTH events.
"""
pass
def new_desc_event(self, event):
"""Called when Tor learns a new server descriptor if listenting to
NEWDESC events.
"""
pass
def msg_event(self, event):
"""Called when a log message of a given severity arrives if listening
to INFO_MSG, NOTICE_MSG, WARN_MSG, or ERR_MSG events."""
pass
def ns_event(self, event):
pass
def new_consensus_event(self, event):
pass
def buildtimeout_set_event(self, event):
pass
def guard_event(self, event):
pass
def address_mapped_event(self, event):
"""Called when Tor adds a mapping for an address if listening
to ADDRESSMAPPED events.
"""
pass
def timer_event(self, event):
pass
class Consensus:
"""
A Consensus is a pickleable container for the members of
ConsensusTracker. This should only be used as a temporary
reference, and will change after a NEWDESC or NEWCONSENUS event.
If you want a copy of a consensus that is independent
of subsequent updates, use copy.deepcopy()
"""
def __init__(self, ns_map, sorted_r, router_map, nick_map, consensus_count):
self.ns_map = ns_map
self.sorted_r = sorted_r
self.routers = router_map
self.name_to_key = nick_map
self.consensus_count = consensus_count
class ConsensusTracker(EventHandler):
"""
A ConsensusTracker is an EventHandler that tracks the current
consensus of Tor in self.ns_map, self.routers and self.sorted_r
Users must subscribe to "NEWCONSENSUS" and "NEWDESC" events.
If you also wish to track the Tor client's opinion on the Running flag
based on reachability tests, you must subscribe to "NS" events,
and you should set the constructor parameter "consensus_only" to
False.
"""
def __init__(self, c, RouterClass=Router, consensus_only=True):
EventHandler.__init__(self)
c.set_event_handler(self)
self.ns_map = {}
self.routers = {}
self.sorted_r = []
self.name_to_key = {}
self.RouterClass = RouterClass
self.consensus_count = 0
self.consensus_only = consensus_only
self.update_consensus()
# XXX: If there were a potential memory leak through perpetually referenced
# objects, this function would be the #1 suspect.
def _read_routers(self, nslist):
# Routers can fall out of our consensus five different ways:
# 1. Their descriptors disappear
# 2. Their NS documents disappear
# 3. They lose the Running flag
# 4. They list a bandwidth of 0
# 5. They have 'opt hibernating' set
routers = self.c.read_routers(nslist) # Sets .down if 3,4,5
self.consensus_count = len(routers)
old_idhexes = set(self.routers.keys())
new_idhexes = set(map(lambda r: r.idhex, routers))
for r in routers:
if r.idhex in self.routers:
if self.routers[r.idhex].nickname != r.nickname:
plog("NOTICE", "Router "+r.idhex+" changed names from "
+self.routers[r.idhex].nickname+" to "+r.nickname)
# Must do IN-PLACE update to keep all the refs to this router
# valid and current (especially for stats)
self.routers[r.idhex].update_to(r)
else:
rc = self.RouterClass(r)
self.routers[rc.idhex] = rc
removed_idhexes = old_idhexes - new_idhexes
removed_idhexes.update(set(map(lambda r: r.idhex,
filter(lambda r: r.down, routers))))
for i in removed_idhexes:
if i not in self.routers: continue
self.routers[i].down = True
if "Running" in self.routers[i].flags:
self.routers[i].flags.remove("Running")
if self.routers[i].refcount == 0:
self.routers[i].deleted = True
if self.routers[i].__class__.__name__ == "StatsRouter":
plog("WARN", "Expiring non-running StatsRouter "+i)
else:
plog("INFO", "Expiring non-running router "+i)
del self.routers[i]
else:
plog("INFO", "Postponing expiring non-running router "+i)
self.routers[i].deleted = True
self.sorted_r = filter(lambda r: not r.down, self.routers.itervalues())
self.sorted_r.sort(lambda x, y: cmp(y.bw, x.bw))
for i in xrange(len(self.sorted_r)): self.sorted_r[i].list_rank = i
# XXX: Verification only. Can be removed.
self._sanity_check(self.sorted_r)
def _sanity_check(self, list):
if len(self.routers) > 1.5*self.consensus_count:
plog("WARN", "Router count of "+str(len(self.routers))+" exceeds consensus count "+str(self.consensus_count)+" by more than 50%")
if len(self.ns_map) < self.consensus_count:
plog("WARN", "NS map count of "+str(len(self.ns_map))+" is below consensus count "+str(self.consensus_count))
downed = filter(lambda r: r.down, list)
for d in downed:
plog("WARN", "Router "+d.idhex+" still present but is down. Del: "+str(d.deleted)+", flags: "+str(d.flags)+", bw: "+str(d.bw))
deleted = filter(lambda r: r.deleted, list)
for d in deleted:
plog("WARN", "Router "+d.idhex+" still present but is deleted. Down: "+str(d.down)+", flags: "+str(d.flags)+", bw: "+str(d.bw))
zero = filter(lambda r: r.refcount == 0 and r.__class__.__name__ == "StatsRouter", list)
for d in zero:
plog("WARN", "Router "+d.idhex+" has refcount 0. Del:"+str(d.deleted)+", Down: "+str(d.down)+", flags: "+str(d.flags)+", bw: "+str(d.bw))
def _update_consensus(self, nslist):
self.ns_map = {}
for n in nslist:
self.ns_map[n.idhex] = n
self.name_to_key[n.nickname] = "$"+n.idhex
def update_consensus(self):
if self.consensus_only:
self._update_consensus(self.c.get_consensus())
else:
self._update_consensus(self.c.get_network_status())
self._read_routers(self.ns_map.values())
def new_consensus_event(self, n):
self._update_consensus(n.nslist)
self._read_routers(self.ns_map.values())
plog("DEBUG", str(time.time()-n.arrived_at)+" Read " + str(len(n.nslist))
+" NC => " + str(len(self.sorted_r)) + " routers")
def new_desc_event(self, d):
update = False
for i in d.idlist:
r = None
try:
if i in self.ns_map:
ns = (self.ns_map[i],)
else:
plog("WARN", "Need to getinfo ns/id for router desc: "+i)
ns = self.c.get_network_status("id/"+i)
r = self.c.read_routers(ns)
except ErrorReply, e:
plog("WARN", "Error reply for "+i+" after NEWDESC: "+str(e))
continue
if not r:
plog("WARN", "No router desc for "+i+" after NEWDESC")
continue
elif len(r) != 1:
plog("WARN", "Multiple descs for "+i+" after NEWDESC")
r = r[0]
ns = ns[0]
if ns.idhex in self.routers and self.routers[ns.idhex].orhash == r.orhash:
plog("NOTICE",
"Got extra NEWDESC event for router "+ns.nickname+"="+ns.idhex)
else:
self.consensus_count += 1
self.name_to_key[ns.nickname] = "$"+ns.idhex
if r and r.idhex in self.ns_map:
if ns.orhash != self.ns_map[r.idhex].orhash:
plog("WARN", "Getinfo and consensus disagree for "+r.idhex)
continue
update = True
if r.idhex in self.routers:
self.routers[r.idhex].update_to(r)
else:
self.routers[r.idhex] = self.RouterClass(r)
if update:
self.sorted_r = filter(lambda r: not r.down, self.routers.itervalues())
self.sorted_r.sort(lambda x, y: cmp(y.bw, x.bw))
for i in xrange(len(self.sorted_r)): self.sorted_r[i].list_rank = i
plog("DEBUG", str(time.time()-d.arrived_at)+ " Read " + str(len(d.idlist))
+" ND => "+str(len(self.sorted_r))+" routers. Update: "+str(update))
# XXX: Verification only. Can be removed.
self._sanity_check(self.sorted_r)
return update
def ns_event(self, ev):
update = False
for ns in ev.nslist:
# Check current consensus.. If present, check flags
if ns.idhex in self.ns_map and ns.idhex in self.routers and \
ns.orhash == self.ns_map[ns.idhex].orhash:
if "Running" in ns.flags and \
"Running" not in self.ns_map[ns.idhex].flags:
plog("INFO", "Router "+ns.nickname+"="+ns.idhex+" is now up.")
update = True
self.routers[ns.idhex].flags = ns.flags
self.routers[ns.idhex].down = False
if "Running" not in ns.flags and \
"Running" in self.ns_map[ns.idhex].flags:
plog("INFO", "Router "+ns.nickname+"="+ns.idhex+" is now down.")
update = True
self.routers[ns.idhex].flags = ns.flags
self.routers[ns.idhex].down = True
if update:
self.sorted_r = filter(lambda r: not r.down, self.routers.itervalues())
self.sorted_r.sort(lambda x, y: cmp(y.bw, x.bw))
for i in xrange(len(self.sorted_r)): self.sorted_r[i].list_rank = i
self._sanity_check(self.sorted_r)
def current_consensus(self):
return Consensus(self.ns_map, self.sorted_r, self.routers,
self.name_to_key, self.consensus_count)
class DebugEventHandler(EventHandler):
"""Trivial debug event handler: reassembles all parsed events to stdout."""
def circ_status_event(self, circ_event): # CircuitEvent()
output = [circ_event.event_name, str(circ_event.circ_id),
circ_event.status]
if circ_event.path:
output.append(",".join(circ_event.path))
if circ_event.reason:
output.append("REASON=" + circ_event.reason)
if circ_event.remote_reason:
output.append("REMOTE_REASON=" + circ_event.remote_reason)
print " ".join(output)
def stream_status_event(self, strm_event):
output = [strm_event.event_name, str(strm_event.strm_id),
strm_event.status, str(strm_event.circ_id),
strm_event.target_host, str(strm_event.target_port)]
if strm_event.reason:
output.append("REASON=" + strm_event.reason)
if strm_event.remote_reason:
output.append("REMOTE_REASON=" + strm_event.remote_reason)
print " ".join(output)
def ns_event(self, ns_event):
for ns in ns_event.nslist:
print " ".join((ns_event.event_name, ns.nickname, ns.idhash,
ns.updated.isoformat(), ns.ip, str(ns.orport),
str(ns.dirport), " ".join(ns.flags)))
def new_consensus_event(self, nc_event):
self.ns_event(nc_event)
def new_desc_event(self, newdesc_event):
print " ".join((newdesc_event.event_name, " ".join(newdesc_event.idlist)))
def or_conn_status_event(self, orconn_event):
if orconn_event.age: age = "AGE="+str(orconn_event.age)
else: age = ""
if orconn_event.read_bytes: read = "READ="+str(orconn_event.read_bytes)
else: read = ""
if orconn_event.wrote_bytes: wrote = "WRITTEN="+str(orconn_event.wrote_bytes)
else: wrote = ""
if orconn_event.reason: reason = "REASON="+orconn_event.reason
else: reason = ""
if orconn_event.ncircs: ncircs = "NCIRCS="+str(orconn_event.ncircs)
else: ncircs = ""
print " ".join((orconn_event.event_name, orconn_event.endpoint,
orconn_event.status, age, read, wrote, reason, ncircs))
def msg_event(self, log_event):
print log_event.event_name+" "+log_event.msg
def bandwidth_event(self, bw_event):
print bw_event.event_name+" "+str(bw_event.read)+" "+str(bw_event.written)
def parseHostAndPort(h):
"""Given a string of the form 'address:port' or 'address' or
'port' or '', return a two-tuple of (address, port)
"""
host, port = "localhost", 9100
if ":" in h:
i = h.index(":")
host = h[:i]
try:
port = int(h[i+1:])
except ValueError:
print "Bad hostname %r"%h
sys.exit(1)
elif h:
try:
port = int(h)
except ValueError:
host = h
return host, port
def run_example(host,port):
""" Example of basic TorCtl usage. See PathSupport for more advanced
usage.
"""
print "host is %s:%d"%(host,port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host,port))
c = Connection(s)
c.set_event_handler(DebugEventHandler())
th = c.launch_thread()
c.authenticate()
print "nick",`c.get_option("nickname")`
print `c.get_info("version")`
#print `c.get_info("desc/name/moria1")`
print `c.get_info("network-status")`
print `c.get_info("addr-mappings/all")`
print `c.get_info("addr-mappings/config")`
print `c.get_info("addr-mappings/cache")`
print `c.get_info("addr-mappings/control")`
print `c.extend_circuit(0,["moria1"])`
try:
print `c.extend_circuit(0,[""])`
except ErrorReply: # wtf?
print "got error. good."
except:
print "Strange error", sys.exc_info()[0]
#send_signal(s,1)
#save_conf(s)
#set_option(s,"1")
#set_option(s,"bandwidthburstbytes 100000")
#set_option(s,"runasdaemon 1")
#set_events(s,[EVENT_TYPE.WARN])
# c.set_events([EVENT_TYPE.ORCONN], True)
c.set_events([EVENT_TYPE.STREAM, EVENT_TYPE.CIRC,
EVENT_TYPE.NEWCONSENSUS, EVENT_TYPE.NEWDESC,
EVENT_TYPE.ORCONN, EVENT_TYPE.BW], True)
th.join()
return
if __name__ == '__main__':
if len(sys.argv) > 2:
print "Syntax: TorControl.py torhost:torport"
sys.exit(0)
else:
sys.argv.append("localhost:9051")
sh,sp = parseHostAndPort(sys.argv[1])
run_example(sh,sp)
| bsd-3-clause |
zfrenchee/pandas | pandas/tests/frame/test_nonunique_indexes.py | 2 | 18505 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import numpy as np
from pandas.compat import lrange, u
from pandas import DataFrame, Series, MultiIndex, date_range
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameNonuniqueIndexes(TestData):
def test_column_dups_operations(self):
def check(result, expected=None):
if expected is not None:
assert_frame_equal(result, expected)
result.dtypes
str(result)
# assignment
# GH 3687
arr = np.random.randn(3, 2)
idx = lrange(2)
df = DataFrame(arr, columns=['A', 'A'])
df.columns = idx
expected = DataFrame(arr, columns=idx)
check(df, expected)
idx = date_range('20130101', periods=4, freq='Q-NOV')
df = DataFrame([[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]],
columns=['a', 'a', 'a', 'a'])
df.columns = idx
expected = DataFrame(
[[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]], columns=idx)
check(df, expected)
# insert
df = DataFrame([[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]],
columns=['foo', 'bar', 'foo', 'hello'])
df['string'] = 'bah'
expected = DataFrame([[1, 1, 1, 5, 'bah'], [1, 1, 2, 5, 'bah'],
[2, 1, 3, 5, 'bah']],
columns=['foo', 'bar', 'foo', 'hello', 'string'])
check(df, expected)
with tm.assert_raises_regex(ValueError, 'Length of value'):
df.insert(0, 'AnotherColumn', range(len(df.index) - 1))
# insert same dtype
df['foo2'] = 3
expected = DataFrame([[1, 1, 1, 5, 'bah', 3], [1, 1, 2, 5, 'bah', 3],
[2, 1, 3, 5, 'bah', 3]],
columns=['foo', 'bar', 'foo', 'hello',
'string', 'foo2'])
check(df, expected)
# set (non-dup)
df['foo2'] = 4
expected = DataFrame([[1, 1, 1, 5, 'bah', 4], [1, 1, 2, 5, 'bah', 4],
[2, 1, 3, 5, 'bah', 4]],
columns=['foo', 'bar', 'foo', 'hello',
'string', 'foo2'])
check(df, expected)
df['foo2'] = 3
# delete (non dup)
del df['bar']
expected = DataFrame([[1, 1, 5, 'bah', 3], [1, 2, 5, 'bah', 3],
[2, 3, 5, 'bah', 3]],
columns=['foo', 'foo', 'hello', 'string', 'foo2'])
check(df, expected)
# try to delete again (its not consolidated)
del df['hello']
expected = DataFrame([[1, 1, 'bah', 3], [1, 2, 'bah', 3],
[2, 3, 'bah', 3]],
columns=['foo', 'foo', 'string', 'foo2'])
check(df, expected)
# consolidate
df = df._consolidate()
expected = DataFrame([[1, 1, 'bah', 3], [1, 2, 'bah', 3],
[2, 3, 'bah', 3]],
columns=['foo', 'foo', 'string', 'foo2'])
check(df, expected)
# insert
df.insert(2, 'new_col', 5.)
expected = DataFrame([[1, 1, 5., 'bah', 3], [1, 2, 5., 'bah', 3],
[2, 3, 5., 'bah', 3]],
columns=['foo', 'foo', 'new_col', 'string',
'foo2'])
check(df, expected)
# insert a dup
tm.assert_raises_regex(ValueError, 'cannot insert',
df.insert, 2, 'new_col', 4.)
df.insert(2, 'new_col', 4., allow_duplicates=True)
expected = DataFrame([[1, 1, 4., 5., 'bah', 3],
[1, 2, 4., 5., 'bah', 3],
[2, 3, 4., 5., 'bah', 3]],
columns=['foo', 'foo', 'new_col',
'new_col', 'string', 'foo2'])
check(df, expected)
# delete (dup)
del df['foo']
expected = DataFrame([[4., 5., 'bah', 3], [4., 5., 'bah', 3],
[4., 5., 'bah', 3]],
columns=['new_col', 'new_col', 'string', 'foo2'])
assert_frame_equal(df, expected)
# dup across dtypes
df = DataFrame([[1, 1, 1., 5], [1, 1, 2., 5], [2, 1, 3., 5]],
columns=['foo', 'bar', 'foo', 'hello'])
check(df)
df['foo2'] = 7.
expected = DataFrame([[1, 1, 1., 5, 7.], [1, 1, 2., 5, 7.],
[2, 1, 3., 5, 7.]],
columns=['foo', 'bar', 'foo', 'hello', 'foo2'])
check(df, expected)
result = df['foo']
expected = DataFrame([[1, 1.], [1, 2.], [2, 3.]],
columns=['foo', 'foo'])
check(result, expected)
# multiple replacements
df['foo'] = 'string'
expected = DataFrame([['string', 1, 'string', 5, 7.],
['string', 1, 'string', 5, 7.],
['string', 1, 'string', 5, 7.]],
columns=['foo', 'bar', 'foo', 'hello', 'foo2'])
check(df, expected)
del df['foo']
expected = DataFrame([[1, 5, 7.], [1, 5, 7.], [1, 5, 7.]], columns=[
'bar', 'hello', 'foo2'])
check(df, expected)
# values
df = DataFrame([[1, 2.5], [3, 4.5]], index=[1, 2], columns=['x', 'x'])
result = df.values
expected = np.array([[1, 2.5], [3, 4.5]])
assert (result == expected).all().all()
# rename, GH 4403
df4 = DataFrame(
{'TClose': [22.02],
'RT': [0.0454],
'TExg': [0.0422]},
index=MultiIndex.from_tuples([(600809, 20130331)],
names=['STK_ID', 'RPT_Date']))
df5 = DataFrame({'STK_ID': [600809] * 3,
'RPT_Date': [20120930, 20121231, 20130331],
'STK_Name': [u('饡驦'), u('饡驦'), u('饡驦')],
'TClose': [38.05, 41.66, 30.01]},
index=MultiIndex.from_tuples(
[(600809, 20120930),
(600809, 20121231),
(600809, 20130331)],
names=['STK_ID', 'RPT_Date']))
k = pd.merge(df4, df5, how='inner', left_index=True, right_index=True)
result = k.rename(
columns={'TClose_x': 'TClose', 'TClose_y': 'QT_Close'})
str(result)
result.dtypes
expected = (DataFrame([[0.0454, 22.02, 0.0422, 20130331, 600809,
u('饡驦'), 30.01]],
columns=['RT', 'TClose', 'TExg',
'RPT_Date', 'STK_ID', 'STK_Name',
'QT_Close'])
.set_index(['STK_ID', 'RPT_Date'], drop=False))
assert_frame_equal(result, expected)
# reindex is invalid!
df = DataFrame([[1, 5, 7.], [1, 5, 7.], [1, 5, 7.]],
columns=['bar', 'a', 'a'])
pytest.raises(ValueError, df.reindex, columns=['bar'])
pytest.raises(ValueError, df.reindex, columns=['bar', 'foo'])
# drop
df = DataFrame([[1, 5, 7.], [1, 5, 7.], [1, 5, 7.]],
columns=['bar', 'a', 'a'])
result = df.drop(['a'], axis=1)
expected = DataFrame([[1], [1], [1]], columns=['bar'])
check(result, expected)
result = df.drop('a', axis=1)
check(result, expected)
# describe
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=['bar', 'a', 'a'], dtype='float64')
result = df.describe()
s = df.iloc[:, 0].describe()
expected = pd.concat([s, s, s], keys=df.columns, axis=1)
check(result, expected)
# check column dups with index equal and not equal to df's index
df = DataFrame(np.random.randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['A', 'B', 'A'])
for index in [df.index, pd.Index(list('edcba'))]:
this_df = df.copy()
expected_ser = pd.Series(index.values, index=this_df.index)
expected_df = DataFrame.from_items([('A', expected_ser),
('B', this_df['B']),
('A', expected_ser)])
this_df['A'] = index
check(this_df, expected_df)
# operations
for op in ['__add__', '__mul__', '__sub__', '__truediv__']:
df = DataFrame(dict(A=np.arange(10), B=np.random.rand(10)))
expected = getattr(df, op)(df)
expected.columns = ['A', 'A']
df.columns = ['A', 'A']
result = getattr(df, op)(df)
check(result, expected)
# multiple assignments that change dtypes
# the location indexer is a slice
# GH 6120
df = DataFrame(np.random.randn(5, 2), columns=['that', 'that'])
expected = DataFrame(1.0, index=range(5), columns=['that', 'that'])
df['that'] = 1.0
check(df, expected)
df = DataFrame(np.random.rand(5, 2), columns=['that', 'that'])
expected = DataFrame(1, index=range(5), columns=['that', 'that'])
df['that'] = 1
check(df, expected)
def test_column_dups2(self):
# drop buggy GH 6240
df = DataFrame({'A': np.random.randn(5),
'B': np.random.randn(5),
'C': np.random.randn(5),
'D': ['a', 'b', 'c', 'd', 'e']})
expected = df.take([0, 1, 1], axis=1)
df2 = df.take([2, 0, 1, 2, 1], axis=1)
result = df2.drop('C', axis=1)
assert_frame_equal(result, expected)
# dropna
df = DataFrame({'A': np.random.randn(5),
'B': np.random.randn(5),
'C': np.random.randn(5),
'D': ['a', 'b', 'c', 'd', 'e']})
df.iloc[2, [0, 1, 2]] = np.nan
df.iloc[0, 0] = np.nan
df.iloc[1, 1] = np.nan
df.iloc[:, 3] = np.nan
expected = df.dropna(subset=['A', 'B', 'C'], how='all')
expected.columns = ['A', 'A', 'B', 'C']
df.columns = ['A', 'A', 'B', 'C']
result = df.dropna(subset=['A', 'C'], how='all')
assert_frame_equal(result, expected)
def test_column_dups_indexing(self):
def check(result, expected=None):
if expected is not None:
assert_frame_equal(result, expected)
result.dtypes
str(result)
# boolean indexing
# GH 4879
dups = ['A', 'A', 'C', 'D']
df = DataFrame(np.arange(12).reshape(3, 4), columns=[
'A', 'B', 'C', 'D'], dtype='float64')
expected = df[df.C > 6]
expected.columns = dups
df = DataFrame(np.arange(12).reshape(3, 4),
columns=dups, dtype='float64')
result = df[df.C > 6]
check(result, expected)
# where
df = DataFrame(np.arange(12).reshape(3, 4), columns=[
'A', 'B', 'C', 'D'], dtype='float64')
expected = df[df > 6]
expected.columns = dups
df = DataFrame(np.arange(12).reshape(3, 4),
columns=dups, dtype='float64')
result = df[df > 6]
check(result, expected)
# boolean with the duplicate raises
df = DataFrame(np.arange(12).reshape(3, 4),
columns=dups, dtype='float64')
pytest.raises(ValueError, lambda: df[df.A > 6])
# dup aligining operations should work
# GH 5185
df1 = DataFrame([1, 2, 3, 4, 5], index=[1, 2, 1, 2, 3])
df2 = DataFrame([1, 2, 3], index=[1, 2, 3])
expected = DataFrame([0, 2, 0, 2, 2], index=[1, 1, 2, 2, 3])
result = df1.sub(df2)
assert_frame_equal(result, expected)
# equality
df1 = DataFrame([[1, 2], [2, np.nan], [3, 4], [4, 4]],
columns=['A', 'B'])
df2 = DataFrame([[0, 1], [2, 4], [2, np.nan], [4, 5]],
columns=['A', 'A'])
# not-comparing like-labelled
pytest.raises(ValueError, lambda: df1 == df2)
df1r = df1.reindex_like(df2)
result = df1r == df2
expected = DataFrame([[False, True], [True, False], [False, False], [
True, False]], columns=['A', 'A'])
assert_frame_equal(result, expected)
# mixed column selection
# GH 5639
dfbool = DataFrame({'one': Series([True, True, False],
index=['a', 'b', 'c']),
'two': Series([False, False, True, False],
index=['a', 'b', 'c', 'd']),
'three': Series([False, True, True, True],
index=['a', 'b', 'c', 'd'])})
expected = pd.concat(
[dfbool['one'], dfbool['three'], dfbool['one']], axis=1)
result = dfbool[['one', 'three', 'one']]
check(result, expected)
# multi-axis dups
# GH 6121
df = DataFrame(np.arange(25.).reshape(5, 5),
index=['a', 'b', 'c', 'd', 'e'],
columns=['A', 'B', 'C', 'D', 'E'])
z = df[['A', 'C', 'A']].copy()
expected = z.loc[['a', 'c', 'a']]
df = DataFrame(np.arange(25.).reshape(5, 5),
index=['a', 'b', 'c', 'd', 'e'],
columns=['A', 'B', 'C', 'D', 'E'])
z = df[['A', 'C', 'A']]
result = z.loc[['a', 'c', 'a']]
check(result, expected)
def test_column_dups_indexing2(self):
# GH 8363
# datetime ops with a non-unique index
df = DataFrame({'A': np.arange(5, dtype='int64'),
'B': np.arange(1, 6, dtype='int64')},
index=[2, 2, 3, 3, 4])
result = df.B - df.A
expected = Series(1, index=[2, 2, 3, 3, 4])
assert_series_equal(result, expected)
df = DataFrame({'A': date_range('20130101', periods=5),
'B': date_range('20130101 09:00:00', periods=5)},
index=[2, 2, 3, 3, 4])
result = df.B - df.A
expected = Series(pd.Timedelta('9 hours'), index=[2, 2, 3, 3, 4])
assert_series_equal(result, expected)
def test_columns_with_dups(self):
# GH 3468 related
# basic
df = DataFrame([[1, 2]], columns=['a', 'a'])
df.columns = ['a', 'a.1']
str(df)
expected = DataFrame([[1, 2]], columns=['a', 'a.1'])
assert_frame_equal(df, expected)
df = DataFrame([[1, 2, 3]], columns=['b', 'a', 'a'])
df.columns = ['b', 'a', 'a.1']
str(df)
expected = DataFrame([[1, 2, 3]], columns=['b', 'a', 'a.1'])
assert_frame_equal(df, expected)
# with a dup index
df = DataFrame([[1, 2]], columns=['a', 'a'])
df.columns = ['b', 'b']
str(df)
expected = DataFrame([[1, 2]], columns=['b', 'b'])
assert_frame_equal(df, expected)
# multi-dtype
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=['a', 'a', 'b', 'b', 'd', 'c', 'c'])
df.columns = list('ABCDEFG')
str(df)
expected = DataFrame(
[[1, 2, 1., 2., 3., 'foo', 'bar']], columns=list('ABCDEFG'))
assert_frame_equal(df, expected)
# this is an error because we cannot disambiguate the dup columns
pytest.raises(Exception, lambda x: DataFrame(
[[1, 2, 'foo', 'bar']], columns=['a', 'a', 'a', 'a']))
# dups across blocks
df_float = DataFrame(np.random.randn(10, 3), dtype='float64')
df_int = DataFrame(np.random.randn(10, 3), dtype='int64')
df_bool = DataFrame(True, index=df_float.index,
columns=df_float.columns)
df_object = DataFrame('foo', index=df_float.index,
columns=df_float.columns)
df_dt = DataFrame(pd.Timestamp('20010101'),
index=df_float.index,
columns=df_float.columns)
df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1)
assert len(df._data._blknos) == len(df.columns)
assert len(df._data._blklocs) == len(df.columns)
# testing iloc
for i in range(len(df.columns)):
df.iloc[:, i]
# dup columns across dtype GH 2079/2194
vals = [[1, -1, 2.], [2, -2, 3.]]
rs = DataFrame(vals, columns=['A', 'A', 'B'])
xp = DataFrame(vals)
xp.columns = ['A', 'A', 'B']
assert_frame_equal(rs, xp)
def test_values_duplicates(self):
df = DataFrame([[1, 2, 'a', 'b'],
[1, 2, 'a', 'b']],
columns=['one', 'one', 'two', 'two'])
result = df.values
expected = np.array([[1, 2, 'a', 'b'], [1, 2, 'a', 'b']],
dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_set_value_by_index(self):
# See gh-12344
df = DataFrame(np.arange(9).reshape(3, 3).T)
df.columns = list('AAA')
expected = df.iloc[:, 2]
df.iloc[:, 0] = 3
assert_series_equal(df.iloc[:, 2], expected)
df = DataFrame(np.arange(9).reshape(3, 3).T)
df.columns = [2, float(2), str(2)]
expected = df.iloc[:, 1]
df.iloc[:, 0] = 3
assert_series_equal(df.iloc[:, 1], expected)
def test_insert_with_columns_dups(self):
# GH 14291
df = pd.DataFrame()
df.insert(0, 'A', ['g', 'h', 'i'], allow_duplicates=True)
df.insert(0, 'A', ['d', 'e', 'f'], allow_duplicates=True)
df.insert(0, 'A', ['a', 'b', 'c'], allow_duplicates=True)
exp = pd.DataFrame([['a', 'd', 'g'], ['b', 'e', 'h'],
['c', 'f', 'i']], columns=['A', 'A', 'A'])
assert_frame_equal(df, exp)
| bsd-3-clause |
adviti/melange | thirdparty/google_appengine/lib/django_1_2/django/contrib/sessions/backends/db.py | 81 | 2716 | import datetime
from django.conf import settings
from django.contrib.sessions.models import Session
from django.contrib.sessions.backends.base import SessionBase, CreateError
from django.core.exceptions import SuspiciousOperation
from django.db import IntegrityError, transaction, router
from django.utils.encoding import force_unicode
class SessionStore(SessionBase):
"""
Implements database session store.
"""
def __init__(self, session_key=None):
super(SessionStore, self).__init__(session_key)
def load(self):
try:
s = Session.objects.get(
session_key = self.session_key,
expire_date__gt=datetime.datetime.now()
)
return self.decode(force_unicode(s.session_data))
except (Session.DoesNotExist, SuspiciousOperation):
self.create()
return {}
def exists(self, session_key):
try:
Session.objects.get(session_key=session_key)
except Session.DoesNotExist:
return False
return True
def create(self):
while True:
self.session_key = self._get_new_session_key()
try:
# Save immediately to ensure we have a unique entry in the
# database.
self.save(must_create=True)
except CreateError:
# Key wasn't unique. Try again.
continue
self.modified = True
self._session_cache = {}
return
def save(self, must_create=False):
"""
Saves the current session data to the database. If 'must_create' is
True, a database error will be raised if the saving operation doesn't
create a *new* entry (as opposed to possibly updating an existing
entry).
"""
obj = Session(
session_key = self.session_key,
session_data = self.encode(self._get_session(no_load=must_create)),
expire_date = self.get_expiry_date()
)
using = router.db_for_write(Session, instance=obj)
sid = transaction.savepoint(using=using)
try:
obj.save(force_insert=must_create, using=using)
except IntegrityError:
if must_create:
transaction.savepoint_rollback(sid, using=using)
raise CreateError
raise
def delete(self, session_key=None):
if session_key is None:
if self._session_key is None:
return
session_key = self._session_key
try:
Session.objects.get(session_key=session_key).delete()
except Session.DoesNotExist:
pass
| apache-2.0 |
gmr/consulate | consulate/api/catalog.py | 1 | 6410 | """
Consul Catalog Endpoint Access
"""
from consulate.api import base
class Catalog(base.Endpoint):
"""The Consul agent is the core process of Consul. The agent maintains
membership information, registers services, runs checks, responds to
queries and more. The agent must run on every node that is part of a
Consul cluster.
"""
def __init__(self, uri, adapter, dc=None, token=None):
super(Catalog, self).__init__(uri, adapter, dc, token)
def register(self, node, address,
datacenter=None,
service=None,
check=None,
node_meta=None):
"""A a low level mechanism for directly registering or updating
entries in the catalog. It is usually recommended to use the agent
local endpoints, as they are simpler and perform anti-entropy.
The behavior of the endpoint depends on what keys are provided. The
endpoint requires Node and Address to be provided, while Datacenter
will be defaulted to match that of the agent. If only those are
provided, the endpoint will register the node with the catalog.
If the Service key is provided, then the service will also be
registered. If ID is not provided, it will be defaulted to Service.
It is mandated that the ID be node-unique. Both Tags and Port can
be omitted.
If the Check key is provided, then a health check will also be
registered. It is important to remember that this register API is
very low level. This manipulates the health check entry, but does
not setup a script or TTL to actually update the status. For that
behavior, an agent local check should be setup.
The CheckID can be omitted, and will default to the Name. Like
before, the CheckID must be node-unique. The Notes is an opaque
field that is meant to hold human readable text. If a ServiceID is
provided that matches the ID of a service on that node, then the
check is treated as a service level health check, instead of a node
level health check. Lastly, the status must be one of "unknown",
"passing", "warning", or "critical". The "unknown" status is used to
indicate that the initial check has not been performed yet.
It is important to note that Check does not have to be provided
with Service and visa-versa. They can be provided or omitted at will.
Example service dict:
.. code:: python
'Service': {
'ID': 'redis1',
'Service': 'redis',
'Tags': ['master', 'v1'],
'Port': 8000,
}
Example check dict:
.. code:: python
'Check': {
'Node': 'foobar',
'CheckID': 'service:redis1',
'Name': 'Redis health check',
'Notes': 'Script based health check',
'Status': 'passing',
'ServiceID': 'redis1'
}
Example node_meta dict:
.. code:: python
'NodeMeta': {
'somekey': 'somevalue'
}
:param str node: The node name
:param str address: The node address
:param str datacenter: The optional node datacenter
:param dict service: An optional node service
:param dict check: An optional node check
:param dict node_meta: Optional node metadata
:rtype: bool
"""
payload = {'Node': node, 'Address': address}
if datacenter:
payload['Datacenter'] = datacenter
if service:
payload['Service'] = service
if check:
payload['Check'] = check
if node_meta:
payload['NodeMeta'] = node_meta
return self._put_response_body(['register'], None, payload)
def deregister(self, node, datacenter=None,
check_id=None, service_id=None):
"""Directly remove entries in the catalog. It is usually recommended
to use the agent local endpoints, as they are simpler and perform
anti-entropy.
The behavior of the endpoint depends on what keys are provided. The
endpoint requires ``node`` to be provided, while ``datacenter`` will
be defaulted to match that of the agent. If only ``node`` is provided,
then the node, and all associated services and checks are deleted. If
``check_id`` is provided, only that check belonging to the node is
removed. If ``service_id`` is provided, then the service along with
it's associated health check (if any) is removed.
:param str node: The node for the action
:param str datacenter: The optional datacenter for the node
:param str check_id: The optional check_id to remove
:param str service_id: The optional service_id to remove
:rtype: bool
"""
payload = {'Node': node}
if datacenter:
payload['Datacenter'] = datacenter
if check_id:
payload['CheckID'] = check_id
if service_id:
payload['ServiceID'] = service_id
return self._put_response_body(['deregister'], None, payload)
def datacenters(self):
"""Return all the datacenters that are known by the Consul server.
:rtype: list
"""
return self._get_list(['datacenters'])
def node(self, node_id):
"""Return the node data for the specified node
:param str node_id: The node ID
:rtype: dict
"""
return self._get(['node', node_id])
def nodes(self, node_meta=None):
"""Return all of the nodes for the current datacenter.
:param str node_meta: Desired node metadata
:rtype: list
"""
query_params = {'node-meta': node_meta} if node_meta else {}
return self._get_list(['nodes'], query_params)
def service(self, service_id):
"""Return the service details for the given service
:param str service_id: The service id
:rtype: list
"""
return self._get_list(['service', service_id])
def services(self):
"""Return a list of all of the services for the current datacenter.
:rtype: list
"""
return self._get_list(['services'])
| bsd-3-clause |
parksangkil/nw.js | tools/package_binaries.py | 64 | 14511 | #!/usr/bin/env python
import argparse
import getnwisrelease
import getnwversion
import gzip
import os
import platform
import shutil
import sys
import tarfile
import zipfile
from subprocess import call
steps = ['nw', 'chromedriver', 'symbol', 'headers', 'others']
################################
# Parse command line args
parser = argparse.ArgumentParser(description='Package nw binaries.')
parser.add_argument('-p','--path', help='Where to find the binaries, like out/Release', required=False)
parser.add_argument('-a','--arch', help='target arch', required=False)
parser.add_argument('-m','--mode', help='package mode', required=False)
parser.add_argument('-i','--icudat', help='icudat override', required=False)
group = parser.add_mutually_exclusive_group()
group.add_argument('-s','--step', choices=steps, help='Execute specified step.', required=False)
group.add_argument('-n','--skip', choices=steps, help='Skip specified step.', required=False)
args = parser.parse_args()
################################
# Init variables.
binaries_location = None # .../out/Release
platform_name = None # win/linux/osx
arch = None # ia32/x64
step = None # nw/chromedriver/symbol
skip = None
nw_ver = None # x.xx
dist_dir = None # .../out/Release/dist
flavor = args.mode
is_headers_ok = False # record whether nw-headers generated
package_name = 'nwjs'
if flavor in ['sdk', 'nacl']:
package_name = 'nwjs-' + args.mode
step = args.step
skip = args.skip
binaries_location = args.path
# If the binaries location is not given, calculate it from script related dir.
if binaries_location == None:
binaries_location = os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir, 'out', 'Release')
if not os.path.isabs(binaries_location):
binaries_location = os.path.join(os.getcwd(), binaries_location)
if not os.path.isdir(binaries_location):
print 'Invalid path: ' + binaries_location
exit(-1)
binaries_location = os.path.normpath(binaries_location)
dist_dir = os.path.join(binaries_location, 'dist')
print 'Working on ' + binaries_location
if args.icudat != None:
#FIXME: for some reason they are the same file (hard link) and copy will fail
os.remove(os.path.join(binaries_location, 'icudtl.dat'))
shutil.copy(args.icudat, binaries_location)
if sys.platform.startswith('linux'):
platform_name = 'linux'
elif sys.platform in ('win32', 'cygwin'):
platform_name = 'win'
elif sys.platform == 'darwin':
platform_name = 'osx'
else:
print 'Unsupported platform: ' + sys.platform
exit(-1)
_arch = platform.architecture()[0]
if _arch == '64bit':
arch = 'x64'
elif _arch == '32bit':
arch = 'ia32'
else:
print 'Unsupported arch: ' + _arch
exit(-1)
if platform_name == 'win':
libfile = os.path.join(binaries_location, 'nw.lib')
expfile = os.path.join(binaries_location, 'nw.exp')
shutil.copy(os.path.join(binaries_location, 'nw.dll.lib'), libfile)
shutil.copy(os.path.join(binaries_location, 'nw.dll.exp'), expfile)
if platform_name == 'win':
arch = 'ia32'
if platform_name != 'osx':
try:
os.remove(os.path.join(binaries_location, 'en-US.pak'))
except OSError:
pass
shutil.copy(os.path.join(binaries_location, 'locales', 'en-US.pak'), binaries_location)
shutil.rmtree(os.path.join(binaries_location, 'locales'))
os.mkdir(os.path.join(binaries_location, 'locales'))
shutil.copy(os.path.join(binaries_location, 'en-US.pak'), os.path.join(binaries_location, 'locales'))
if platform_name == 'osx':
# detect output arch
nw_bin = binaries_location + '/nwjs.app/Contents/MacOS/nwjs'
import subprocess
if 'i386' in subprocess.check_output(['file',nw_bin]):
arch = 'ia32'
else: # should be 'x86_64'
arch = 'x64'
if args.arch != None:
arch = args.arch
nw_ver = getnwversion.nw_version
if getnwisrelease.release == 0:
nw_ver += getnwisrelease.postfix
################################
# Generate targets
#
# target example:
# {
# 'input' : [ 'nw', 'nw.pak', ... ]
# 'output' : 'nwjs-v0.9.2-linux-x64'
# 'compress' : 'tar.gz'
# 'folder' : True # Optional. More than 2 files will be put into a seprate folder
# # normally, if you want do to this for only 1 file, set this flag.
# }
def generate_target_nw(platform_name, arch, version):
target = {}
# Output
target['output'] = ''.join([
package_name, '-',
'v', version,
'-', platform_name,
'-', arch])
# Compress type
if platform_name == 'linux':
target['compress'] = 'tar.gz'
else:
target['compress'] = 'zip'
# Input
if platform_name == 'linux':
target['input'] = [
'credits.html',
'resources.pak',
'nw_100_percent.pak',
'nw',
'icudtl.dat',
'locales',
'snapshot_blob.bin',
'natives_blob.bin',
]
if flavor in ['nacl','sdk'] :
target['input'] += ['nacl_helper', 'nacl_helper_bootstrap', 'pnacl']
if arch == 'x64':
target['input'].append('nacl_irt_x86_64.nexe')
else:
target['input'].append('nacl_irt_x86_32.nexe')
elif platform_name == 'win':
target['input'] = [
'snapshot_blob.bin',
'natives_blob.bin',
'd3dcompiler_47.dll',
'libEGL.dll',
'libGLESv2.dll',
'nw.dll',
'nw_elf.dll',
'nw.exe',
'locales',
'icudtl.dat',
'credits.html',
'resources.pak',
'nw_100_percent.pak',
'nw_200_percent.pak',
]
if flavor in ['nacl','sdk'] :
target['input'].append('pnacl')
if arch == 'x64':
target['input'].append('nacl_irt_x86_64.nexe')
else:
target['input'].append('nacl_irt_x86_32.nexe')
elif platform_name == 'osx':
target['input'] = [
'nwjs.app',
'credits.html',
]
else:
print 'Unsupported platform: ' + platform_name
exit(-1)
return target
def generate_target_chromedriver(platform_name, arch, version):
if args.mode != 'sdk':
return generate_target_empty(platform_name, arch, version)
target = {}
# Output
target['output'] = ''.join([
'chromedriver-nw-',
'v', version,
'-', platform_name,
'-', arch])
# Compress type
if platform_name == 'linux':
target['compress'] = 'tar.gz'
else:
target['compress'] = 'zip'
# Input
if platform_name == 'win':
target['input'] = ['chromedriver.exe']
else:
target['input'] = ['chromedriver']
target['folder'] = True # always create a folder
return target
def generate_target_symbols(platform_name, arch, version):
target = {}
target['output'] = ''.join([package_name, '-symbol-',
'v', version,
'-', platform_name,
'-', arch])
if platform_name == 'linux':
target['compress'] = 'tar.gz'
target['input'] = ['nw.breakpad.' + arch]
target['folder'] = True
elif platform_name == 'win':
target['compress'] = None
target['input'] = ['nw.sym.7z']
target['output'] = ''.join([package_name, '-symbol-',
'v', version,
'-', platform_name,
'-', arch, '.7z'])
elif platform_name == 'osx':
target['compress'] = 'zip'
target['input'] = [
'nwjs.breakpad.tar'
]
target['folder'] = True
else:
print 'Unsupported platform: ' + platform_name
exit(-1)
return target
def generate_target_headers(platform_name, arch, version):
# here, call make_nw_header tool to generate headers
# then, move to binaries_location
target = {}
target['output'] = ''
target['compress'] = None
if platform_name == 'osx':
target['input'] = []
# here , call make-nw-headers.py to generate nw headers
make_nw_header = os.path.join(os.path.dirname(__file__), \
'make-nw-headers.py')
print make_nw_header
res = call(['python', make_nw_header])
if res == 0:
print 'nw-headers generated'
nw_headers_name = 'nw-headers-v' + version + '.tar.gz'
nw_headers_path = os.path.join(os.path.dirname(__file__), \
os.pardir, 'tmp', nw_headers_name)
if os.path.isfile(os.path.join(binaries_location, nw_headers_name)):
os.remove(os.path.join(binaries_location, nw_headers_name))
shutil.move(nw_headers_path, binaries_location)
target['input'].append(nw_headers_name)
else:
#TODO, handle err
print 'nw-headers generate failed'
elif platform_name == 'win':
target['input'] = []
elif platform_name == 'linux':
target['input'] = []
else:
print 'Unsupported platform: ' + platform_name
exit(-1)
return target
def generate_target_empty(platform_name, arch, version):
target = {}
target['output'] = ''
target['compress'] = None
if platform_name == 'win':
target['input'] = []
elif platform_name == 'linux' :
target['input'] = []
else:
target['input'] = []
return target
def generate_target_others(platform_name, arch, version):
target = {}
target['output'] = ''
target['compress'] = None
if platform_name == 'win':
target['input'] = ['nw.exp', 'nw.lib']
elif platform_name == 'linux' :
target['input'] = []
else:
target['input'] = []
return target
################################
# Make packages
def compress(from_dir, to_dir, fname, compress):
from_dir = os.path.normpath(from_dir)
to_dir = os.path.normpath(to_dir)
_from = os.path.join(from_dir, fname)
_to = os.path.join(to_dir, fname)
if compress == 'zip':
z = zipfile.ZipFile(_to + '.zip', 'w', compression=zipfile.ZIP_DEFLATED)
if os.path.isdir(_from):
for root, dirs, files in os.walk(_from):
for f in files:
_path = os.path.join(root, f)
z.write(_path, _path.replace(from_dir+os.sep, ''))
else:
z.write(_from, fname)
z.close()
elif compress == 'tar.gz': # only for folders
if not os.path.isdir(_from):
print 'Will not create tar.gz for a single file: ' + _from
exit(-1)
with tarfile.open(_to + '.tar.gz', 'w:gz') as tar:
tar.add(_from, arcname=os.path.basename(_from))
elif compress == 'gz': # only for single file
if os.path.isdir(_from):
print 'Will not create gz for a folder: ' + _from
exit(-1)
f_in = open(_from, 'rb')
f_out = gzip.open(_to + '.gz', 'wb')
f_out.writelines(f_in)
f_out.close()
f_in.close()
else:
print 'Unsupported compression format: ' + compress
exit(-1)
def make_packages(targets):
# check file existance
for t in targets:
for f in t['input']:
src = os.path.join(binaries_location, f)
if not os.path.exists(src):
print 'File does not exist: ', src
exit(-1)
# clear the output folder
if os.path.exists(dist_dir):
if not os.path.isdir(dist_dir):
print 'Invalid path: ' + dist_dir
exit(-1)
else:
shutil.rmtree(dist_dir)
# now let's do it
os.mkdir(dist_dir)
for t in targets:
if len(t['input']) == 0:
continue
if t['compress'] == None:
for f in t['input']:
src = os.path.join(binaries_location, f)
if t['output'] != '':
dest = os.path.join(dist_dir, t['output'])
else:
dest = os.path.join(dist_dir, f)
print "Copying " + f
shutil.copy(src, dest)
elif (t.has_key('folder') and t['folder'] == True) or len(t['input']) > 1:
print 'Making "' + t['output'] + '.' + t['compress'] + '"'
# copy files into a folder then pack
folder = os.path.join(dist_dir, t['output'])
os.mkdir(folder)
for f in t['input']:
src = os.path.join(binaries_location, f)
dest = os.path.join(folder, f)
if os.path.isdir(src): # like nw.app
shutil.copytree(src, dest)
else:
shutil.copy(src, dest)
compress(dist_dir, dist_dir, t['output'], t['compress'])
# remove temp folders
shutil.rmtree(folder)
else:
# single file
print 'Making "' + t['output'] + '.' + t['compress'] + '"'
compress(binaries_location, dist_dir, t['input'][0], t['compress'])
# must be aligned with steps
generators = {}
generators['nw'] = generate_target_nw
generators['chromedriver'] = generate_target_chromedriver
generators['symbol'] = generate_target_symbols
generators['headers'] = generate_target_headers
generators['others'] = generate_target_others
################################
# Process targets
targets = []
for s in steps:
if (step != None) and (s != step):
continue
if (skip != None) and (s == skip):
continue
targets.append(generators[s](platform_name, arch, nw_ver))
print 'Creating packages...'
make_packages(targets)
# vim: et:ts=4:sw=4
| mit |
armpc/repo | subcmds/upload.py | 1 | 10547 | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import re
import sys
from command import InteractiveCommand
from editor import Editor
from error import UploadError
UNUSUAL_COMMIT_THRESHOLD = 5
def _ConfirmManyUploads(multiple_branches=False):
if multiple_branches:
print "ATTENTION: One or more branches has an unusually high number of commits."
else:
print "ATTENTION: You are uploading an unusually high number of commits."
print "YOU PROBABLY DO NOT MEAN TO DO THIS. (Did you rebase across branches?)"
answer = raw_input("If you are sure you intend to do this, type 'yes': ").strip()
return answer == "yes"
def _die(fmt, *args):
msg = fmt % args
print >>sys.stderr, 'error: %s' % msg
sys.exit(1)
def _SplitEmails(values):
result = []
for str in values:
result.extend([s.strip() for s in str.split(',')])
return result
class Upload(InteractiveCommand):
common = True
helpSummary = "Upload changes for code review"
helpUsage="""
%prog [--re --cc] [<project>]...
"""
helpDescription = """
The '%prog' command is used to send changes to the Gerrit Code
Review system. It searches for topic branches in local projects
that have not yet been published for review. If multiple topic
branches are found, '%prog' opens an editor to allow the user to
select which branches to upload.
'%prog' searches for uploadable changes in all projects listed at
the command line. Projects can be specified either by name, or by
a relative or absolute path to the project's local directory. If no
projects are specified, '%prog' will search for uploadable changes
in all projects listed in the manifest.
If the --reviewers or --cc options are passed, those emails are
added to the respective list of users, and emails are sent to any
new users. Users passed as --reviewers must already be registered
with the code review system, or the upload will fail.
Configuration
-------------
review.URL.autoupload:
To disable the "Upload ... (y/n)?" prompt, you can set a per-project
or global Git configuration option. If review.URL.autoupload is set
to "true" then repo will assume you always answer "y" at the prompt,
and will not prompt you further. If it is set to "false" then repo
will assume you always answer "n", and will abort.
review.URL.autocopy:
To automatically copy a user or mailing list to all uploaded reviews,
you can set a per-project or global Git option to do so. Specifically,
review.URL.autocopy can be set to a comma separated list of reviewers
who you always want copied on all uploads with a non-empty --re
argument.
review.URL.username:
Override the username used to connect to Gerrit Code Review.
By default the local part of the email address is used.
The URL must match the review URL listed in the manifest XML file,
or in the .git/config within the project. For example:
[remote "origin"]
url = git://git.example.com/project.git
review = http://review.example.com/
[review "http://review.example.com/"]
autoupload = true
autocopy = johndoe@company.com,my-team-alias@company.com
References
----------
Gerrit Code Review: http://code.google.com/p/gerrit/
"""
def _Options(self, p):
p.add_option('-t',
dest='auto_topic', action='store_true',
help='Send local branch name to Gerrit Code Review')
p.add_option('--re', '--reviewers',
type='string', action='append', dest='reviewers',
help='Request reviews from these people.')
p.add_option('--cc',
type='string', action='append', dest='cc',
help='Also send email to these email addresses.')
def _SingleBranch(self, opt, branch, people):
project = branch.project
name = branch.name
remote = project.GetBranch(name).remote
key = 'review.%s.autoupload' % remote.review
answer = project.config.GetBoolean(key)
if answer is False:
_die("upload blocked by %s = false" % key)
if answer is None:
date = branch.date
list = branch.commits
print 'Upload project %s/:' % project.relpath
print ' branch %s (%2d commit%s, %s):' % (
name,
len(list),
len(list) != 1 and 's' or '',
date)
for commit in list:
print ' %s' % commit
sys.stdout.write('to %s (y/n)? ' % remote.review)
answer = sys.stdin.readline().strip()
answer = answer in ('y', 'Y', 'yes', '1', 'true', 't')
if answer:
if len(branch.commits) > UNUSUAL_COMMIT_THRESHOLD:
answer = _ConfirmManyUploads()
if answer:
self._UploadAndReport(opt, [branch], people)
else:
_die("upload aborted by user")
def _MultipleBranches(self, opt, pending, people):
projects = {}
branches = {}
script = []
script.append('# Uncomment the branches to upload:')
for project, avail in pending:
script.append('#')
script.append('# project %s/:' % project.relpath)
b = {}
for branch in avail:
name = branch.name
date = branch.date
list = branch.commits
if b:
script.append('#')
script.append('# branch %s (%2d commit%s, %s):' % (
name,
len(list),
len(list) != 1 and 's' or '',
date))
for commit in list:
script.append('# %s' % commit)
b[name] = branch
projects[project.relpath] = project
branches[project.name] = b
script.append('')
script = Editor.EditString("\n".join(script)).split("\n")
project_re = re.compile(r'^#?\s*project\s*([^\s]+)/:$')
branch_re = re.compile(r'^\s*branch\s*([^\s(]+)\s*\(.*')
project = None
todo = []
for line in script:
m = project_re.match(line)
if m:
name = m.group(1)
project = projects.get(name)
if not project:
_die('project %s not available for upload', name)
continue
m = branch_re.match(line)
if m:
name = m.group(1)
if not project:
_die('project for branch %s not in script', name)
branch = branches[project.name].get(name)
if not branch:
_die('branch %s not in %s', name, project.relpath)
todo.append(branch)
if not todo:
_die("nothing uncommented for upload")
many_commits = False
for branch in todo:
if len(branch.commits) > UNUSUAL_COMMIT_THRESHOLD:
many_commits = True
break
if many_commits:
if not _ConfirmManyUploads(multiple_branches=True):
_die("upload aborted by user")
self._UploadAndReport(opt, todo, people)
def _AppendAutoCcList(self, branch, people):
"""
Appends the list of users in the CC list in the git project's config if a
non-empty reviewer list was found.
"""
name = branch.name
project = branch.project
key = 'review.%s.autocopy' % project.GetBranch(name).remote.review
raw_list = project.config.GetString(key)
if not raw_list is None and len(people[0]) > 0:
people[1].extend([entry.strip() for entry in raw_list.split(',')])
def _FindGerritChange(self, branch):
last_pub = branch.project.WasPublished(branch.name)
if last_pub is None:
return ""
refs = branch.GetPublishedRefs()
try:
# refs/changes/XYZ/N --> XYZ
return refs.get(last_pub).split('/')[-2]
except:
return ""
def _UploadAndReport(self, opt, todo, original_people):
have_errors = False
for branch in todo:
try:
people = copy.deepcopy(original_people)
self._AppendAutoCcList(branch, people)
# Check if there are local changes that may have been forgotten
if branch.project.HasChanges():
key = 'review.%s.autoupload' % branch.project.remote.review
answer = branch.project.config.GetBoolean(key)
# if they want to auto upload, let's not ask because it could be automated
if answer is None:
sys.stdout.write('Uncommitted changes in ' + branch.project.name + ' (did you forget to amend?). Continue uploading? (y/n) ')
a = sys.stdin.readline().strip().lower()
if a not in ('y', 'yes', 't', 'true', 'on'):
print >>sys.stderr, "skipping upload"
branch.uploaded = False
branch.error = 'User aborted'
continue
branch.UploadForReview(people, auto_topic=opt.auto_topic)
branch.uploaded = True
except UploadError, e:
branch.error = e
branch.uploaded = False
have_errors = True
print >>sys.stderr, ''
print >>sys.stderr, '--------------------------------------------'
if have_errors:
for branch in todo:
if not branch.uploaded:
print >>sys.stderr, '[FAILED] %-15s %-15s (%s)' % (
branch.project.relpath + '/', \
branch.name, \
branch.error)
print >>sys.stderr, ''
for branch in todo:
if branch.uploaded:
print >>sys.stderr, '[OK ] %-15s %s' % (
branch.project.relpath + '/',
branch.name)
if have_errors:
sys.exit(1)
def Execute(self, opt, args):
project_list = self.GetProjects(args)
pending = []
reviewers = []
cc = []
if opt.reviewers:
reviewers = _SplitEmails(opt.reviewers)
if opt.cc:
cc = _SplitEmails(opt.cc)
people = (reviewers,cc)
for project in project_list:
avail = project.GetUploadableBranches()
if avail:
pending.append((project, avail))
if not pending:
print >>sys.stdout, "no branches ready for upload"
elif len(pending) == 1 and len(pending[0][1]) == 1:
self._SingleBranch(opt, pending[0][1][0], people)
else:
self._MultipleBranches(opt, pending, people)
| apache-2.0 |
quarkonics/zstack-woodpecker | zstackwoodpecker/zstackwoodpecker/ansible.py | 2 | 2482 | '''
@author: YYK
'''
import zstacklib.utils.shell as shell
import zstacklib.utils.ssh as ssh
import os.path
import sys
def check_and_install_ansible():
cmd = 'which ansible'
try:
shell.call(cmd)
except:
print('ansible is not installed. Will try to install ansible')
cmd = 'pip install ansible'
shell.call(cmd)
print('ansible is installed successfully')
def enable_ansible_connection(target, username, password, exc_info):
ansible_config = '/etc/ansible/ansible.cfg'
host_config = '/etc/ansible/hosts'
ansible_config_content='''
[defaults]
forks = 100
host_key_checking = False
pipelining = True
'''
add_host_cmd = "grep '^%s$' %s; if [ $? -ne 0 ]; then echo -e '\n%s\n' >> %s; sed -i '/^$/d' %s; fi; " % (target, host_config, target, host_config, host_config)
if not os.path.exists(os.path.dirname(ansible_config)):
os.system('mkdir -p %s' % os.path.dirname(ansible_config))
if not os.path.exists(ansible_config):
open(ansible_config).write(ansible_config_content)
shell.call(add_host_cmd)
print('Create no ssh password for: %s ' % target)
try:
ssh.make_ssh_no_password(target, username, password)
except Exception as e:
exc_info.append(sys.exc_info())
raise e
def do_ansible(ansible_dir, ansible_cmd, lib_files, exc_info):
'''
If need to execute ansible_cmd for mulitle hosts, the params is like:
-e 'host=HOST1:HOST2:HOST3 other_args'.
If Ansible failed, the files in list of lib_files will be deleted from
target machine.
'''
print('ansible-playbook -vvvv %s' % ansible_cmd)
try:
print shell.call('cd %s; ansible-playbook -vvvv %s' % (ansible_dir, ansible_cmd))
except Exception as e:
for lib_file in lib_files:
shell.call('/bin/rm -rf /var/lib/zstack/%s' % lib_file)
exc_info.append(sys.exc_info())
raise e
print('Execute ansible command successfully: ansible-playbook %s ' % ansible_cmd)
def execute_ansible(target, username, password, ansible_dir, ansible_cmd, lib_files = [], exc_info = []):
'''
lib_files is a list includes the file will be copied to target machine by
ansible. It is usually a file with its parent folder under /var/lib/zstack.
e.g. [ 'testagent/zstacklib-0.1.0.tar.gz' ]
'''
enable_ansible_connection(target, username, password, exc_info)
do_ansible(ansible_dir, ansible_cmd, lib_files, exc_info)
| apache-2.0 |
hasteur/hasteurbot_task_3 | families/wikitravel_shared_family.py | 4 | 1070 | # -*- coding: utf-8 -*-
__version__ = '$Id$'
import family
# The Wikitravel shared family
class Family(family.Family):
def __init__(self):
family.Family.__init__(self)
self.name = 'wikitravel_shared'
self.langs = {
'wikitravel_shared': 'wikitravel.org',
}
self.namespaces[4] = {
'_default': u'Wikitravel Shared',
}
self.namespaces[5] = {
'_default': u'Wikitravel Shared talk',
}
self.namespaces[6] = {
'_default': u'Image',
}
self.namespaces[7] = {
'_default': u'Image talk',
}
self.namespaces[200] = {
'_default': u'Tech',
}
self.namespaces[201] = {
'_default': u'Tech talk',
}
self.interwiki_forward = 'wikitravel'
def scriptpath(self, code):
return '/wiki/shared'
def shared_image_repository(self, code):
return ('wikitravel_shared', 'wikitravel_shared')
def version(self, code):
return "1.10.1"
| gpl-2.0 |
jasonhamilton/hotwing-core | tests/test_coordinate.py | 1 | 3883 | import pytest
import sys, os
from decimal import Decimal
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../')
from hotwing_core import Coordinate
class TestCoordinate():
def test_create_coordinate(self):
x = 1
y = 1.4
c = Coordinate(x, y)
assert x == c.x
assert y == c.y
def test_representations(self):
x = 1
y = 1.4
c = Coordinate(x, y)
assert "1.00000, 1.40000" == c.__str__()
assert "Coordinate: 1.00000, 1.40000" == c.__repr__()
def test_equality_comparison(self):
x = 1
y = 1.4
c1 = Coordinate(x, y)
c2 = Coordinate(x, y)
c3 = Coordinate(y, x)
assert c1 == c2
assert not c2 == c3
# test inequality operator too
assert c2 != c3
assert not c1 != c2
# try comparison to non object
with pytest.raises(NotImplementedError):
1 == c1
with pytest.raises(NotImplementedError):
c1 == 1
with pytest.raises(NotImplementedError):
1 != c1
with pytest.raises(NotImplementedError):
c1 != 1
def test_calc_slope(self):
c1 = Coordinate(0, 0)
c2 = Coordinate(1, 1)
s1 = Coordinate.calc_slope(c1,c2)
assert s1 == 1
c1 = Coordinate(0, 0)
c2 = Coordinate(1, 0)
s1 = Coordinate.calc_slope(c1,c2)
assert s1 == 0
# test infinite slope
c1 = Coordinate(0, 0)
c2 = Coordinate(0, 0)
s1 = Coordinate.calc_slope(c1,c2)
assert s1 >= 1000000
def test_calc_dist(self):
c1 = Coordinate(0, 0)
c2 = Coordinate(1, 0)
d = Coordinate.calc_dist(c1,c2)
assert d == 1
c1 = Coordinate(0, 0)
c2 = Coordinate(0, 2)
d = Coordinate.calc_dist(c1,c2)
assert d == 2
c1 = Coordinate(0, 0)
c2 = Coordinate(0, -3)
d = Coordinate.calc_dist(c1,c2)
assert d == 3
c1 = Coordinate(-4, 0)
c2 = Coordinate(0, 0)
d = Coordinate.calc_dist(c1,c2)
assert d == 4
c1 = Coordinate(0, 0)
c2 = Coordinate(3, 4)
d = Coordinate.calc_dist(c1,c2)
assert d == 5
c1 = Coordinate(0, 0)
c2 = Coordinate(0, 0)
d = Coordinate.calc_dist(c1,c2)
assert d == 0
def test_rotate(self):
o = Coordinate(1, 1) # origin to rotate
c = Coordinate(3, 3) # coordinate
a = 90 # angle
e = Coordinate(-1,3) # expected
r = Coordinate.rotate(o,c,a)
assert e == r
o = Coordinate(1, 1)
c = Coordinate(3, 3)
a = 180
e = Coordinate(-1,-1)
r = Coordinate.rotate(o,c,a)
assert e == r
o = Coordinate(1, 1)
c = Coordinate(3, 3)
a = 270
e = Coordinate(3,-1)
r = Coordinate.rotate(o,c,a)
assert e == r
o = Coordinate(1, 1)
c = Coordinate(3, 3)
a = 360
e = c
r = Coordinate.rotate(o,c,a)
assert e == r
def test_add(self):
c1 = Coordinate(1, 2)
c2 = Coordinate(3, 4)
e = Coordinate(1+3,2+4)
assert c1+c2 == e
with pytest.raises(NotImplementedError):
c1 + 1
def test_subtract(self):
c1 = Coordinate(1, 2)
c2 = Coordinate(3, 4)
e = Coordinate(1-3,2-4)
assert c1-c2 == e
with pytest.raises(NotImplementedError):
c1 - 1
def test_multiply(self):
c1 = Coordinate(0, 1)
c2 = Coordinate(2, 3)
assert c1*c2 == Coordinate(0*2,1*3)
# test with numeric objects
assert c1*2 == Coordinate(0*2,1*2)
assert c1*2 == Coordinate(0*float(2),1*float(2))
assert c1*2 == Coordinate(0*Decimal(2),1*Decimal(2))
| gpl-3.0 |
SebastianMerz/calalert | Server/venv/lib/python2.7/site-packages/unittest2/runner.py | 164 | 6757 | """Running tests"""
import sys
import time
import unittest
from unittest2 import result
try:
from unittest2.signals import registerResult
except ImportError:
def registerResult(_):
pass
__unittest = True
class _WritelnDecorator(object):
"""Used to decorate file-like objects with a handy 'writeln' method"""
def __init__(self,stream):
self.stream = stream
def __getattr__(self, attr):
if attr in ('stream', '__getstate__'):
raise AttributeError(attr)
return getattr(self.stream,attr)
def writeln(self, arg=None):
if arg:
self.write(arg)
self.write('\n') # text-mode streams translate to \r\n if needed
class TextTestResult(result.TestResult):
"""A test result class that can print formatted text results to a stream.
Used by TextTestRunner.
"""
separator1 = '=' * 70
separator2 = '-' * 70
def __init__(self, stream, descriptions, verbosity):
super(TextTestResult, self).__init__()
self.stream = stream
self.showAll = verbosity > 1
self.dots = verbosity == 1
self.descriptions = descriptions
def getDescription(self, test):
doc_first_line = test.shortDescription()
if self.descriptions and doc_first_line:
return '\n'.join((str(test), doc_first_line))
else:
return str(test)
def startTest(self, test):
super(TextTestResult, self).startTest(test)
if self.showAll:
self.stream.write(self.getDescription(test))
self.stream.write(" ... ")
self.stream.flush()
def addSuccess(self, test):
super(TextTestResult, self).addSuccess(test)
if self.showAll:
self.stream.writeln("ok")
elif self.dots:
self.stream.write('.')
self.stream.flush()
def addError(self, test, err):
super(TextTestResult, self).addError(test, err)
if self.showAll:
self.stream.writeln("ERROR")
elif self.dots:
self.stream.write('E')
self.stream.flush()
def addFailure(self, test, err):
super(TextTestResult, self).addFailure(test, err)
if self.showAll:
self.stream.writeln("FAIL")
elif self.dots:
self.stream.write('F')
self.stream.flush()
def addSkip(self, test, reason):
super(TextTestResult, self).addSkip(test, reason)
if self.showAll:
self.stream.writeln("skipped %r" % (reason,))
elif self.dots:
self.stream.write("s")
self.stream.flush()
def addExpectedFailure(self, test, err):
super(TextTestResult, self).addExpectedFailure(test, err)
if self.showAll:
self.stream.writeln("expected failure")
elif self.dots:
self.stream.write("x")
self.stream.flush()
def addUnexpectedSuccess(self, test):
super(TextTestResult, self).addUnexpectedSuccess(test)
if self.showAll:
self.stream.writeln("unexpected success")
elif self.dots:
self.stream.write("u")
self.stream.flush()
def printErrors(self):
if self.dots or self.showAll:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
for test, err in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
def stopTestRun(self):
super(TextTestResult, self).stopTestRun()
self.printErrors()
class TextTestRunner(unittest.TextTestRunner):
"""A test runner class that displays results in textual form.
It prints out the names of tests as they are run, errors as they
occur, and a summary of the results at the end of the test run.
"""
resultclass = TextTestResult
def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1,
failfast=False, buffer=False, resultclass=None):
self.stream = _WritelnDecorator(stream)
self.descriptions = descriptions
self.verbosity = verbosity
self.failfast = failfast
self.buffer = buffer
if resultclass is not None:
self.resultclass = resultclass
def _makeResult(self):
return self.resultclass(self.stream, self.descriptions, self.verbosity)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
result.failfast = self.failfast
result.buffer = self.buffer
registerResult(result)
startTime = time.time()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
try:
test(result)
finally:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
else:
result.printErrors()
stopTime = time.time()
timeTaken = stopTime - startTime
if hasattr(result, 'separator2'):
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
expectedFails = unexpectedSuccesses = skipped = 0
try:
results = map(len, (result.expectedFailures,
result.unexpectedSuccesses,
result.skipped))
expectedFails, unexpectedSuccesses, skipped = results
except AttributeError:
pass
infos = []
if not result.wasSuccessful():
self.stream.write("FAILED")
failed, errored = map(len, (result.failures, result.errors))
if failed:
infos.append("failures=%d" % failed)
if errored:
infos.append("errors=%d" % errored)
else:
self.stream.write("OK")
if skipped:
infos.append("skipped=%d" % skipped)
if expectedFails:
infos.append("expected failures=%d" % expectedFails)
if unexpectedSuccesses:
infos.append("unexpected successes=%d" % unexpectedSuccesses)
if infos:
self.stream.writeln(" (%s)" % (", ".join(infos),))
else:
self.stream.write("\n")
return result
| gpl-2.0 |
elifeasley/metacademy-application | content_server/concepts.py | 1 | 5579 | import random
import resources
import string
import work_estimates
ID_LENGTH = 8
class Dependency:
"""A struct representing a dependency on another concept."""
def __init__(self, tag, reason, shortcut):
self.tag = tag
self.reason = reason
assert type(shortcut) == int
self.shortcut = shortcut
def compute_outlinks(tag, db):
child_tags = set([t for _, t in db.graph.outgoing['concept', tag]])
if ('shortcut', tag) in db.graph.outgoing:
child_tags.update([t for _, t in db.graph.outgoing['shortcut', tag]])
result = []
for t in child_tags:
reason = None
for d in db.nodes[t].dependencies:
if d.tag == tag and d.reason:
reason = d.reason
if reason:
result.append({'from_tag': tag, 'to_tag': t, 'reason': reason})
else:
result.append({'from_tag': tag, 'to_tag': t})
return result
class Concept:
"""A struct containing the information relevant to a single concept node.
tag -- a shorthand form to reference a node from elsewhere in the graph
title -- the title which will be displayed to the user
summary -- brief summary of the node's concept
dependencies -- a list of Dependency objects giving the immediate dependencies
pointers -- a list of Pointer objects representing the see-also links
"""
def __init__(self, tag, id, title, summary, goals, dependencies, pointers, resources, questions, flags):
self.tag = tag
self.id = id
self.title = title
self.summary = summary
self.goals = goals
self.dependencies = dependencies
self.pointers = pointers
self.resources = resources
self.questions = questions
self.flags = flags
def copy(self):
return Concept(self.tag, self.id, self.title, self.summary, [p.copy() for p in self.goals], list(self.dependencies),
[p.copy() for p in self.pointers], list(self.resources), list(self.questions), list(self.flags))
def json_repr(self, db):
res = [resources.json_repr(resources.add_defaults(r, db.resources), db) for r in self.resources]
outlinks = compute_outlinks(self.tag, db)
dependencies = [{'from_tag': dep.tag, 'to_tag': self.tag, 'reason': dep.reason}
for dep in self.dependencies]
pointers = [p.json_repr(db.nodes) for p in self.pointers]
goals = [p.json_repr(db.nodes) for p in self.goals]
flags = [db.flags[f] for f in self.flags if f in db.flags]
d = {'tag': self.tag,
'title': self.title,
'id': self.id,
'summary': self.summary,
'goals': goals,
'pointers': pointers,
'dependencies': dependencies,
'resources': res,
'questions': self.questions,
'outlinks': outlinks,
'is_shortcut': 0,
'flags': flags,
}
if self.tag in db.concept_times:
d['time'] = max(db.concept_times[self.tag], work_estimates.MIN_TIME)
return d
def get_resource_keys(self):
keys = None
if self.resources:
keys = [rdic['source'] for rdic in self.resources]
return keys
class Shortcut:
"""A struct contatining the information about a shortcut for a concept node.
concept -- the Concept instance which this is a shortcut to
dependencies -- a list of Dependency objects
resources -- a list of resource dicts
questions -- a list of comprehension questions
A requirement for the graph is that the shortcut dependencies be a subset of the
dependencies for the node itself.
"""
def __init__(self, concept, goals, dependencies, resources, questions):
self.concept = concept
self.goals = goals
self.dependencies = dependencies
self.resources = resources
self.questions = questions
def copy(self):
return Shortcut(self.concept.copy(), [p.copy() for p in self.goals], list(self.dependencies), list(self.resources))
def json_repr(self, db):
res = [resources.json_repr(resources.add_defaults(r, db.resources), db) for r in self.resources]
outlinks = compute_outlinks(self.concept.tag, db)
dependencies = [{'from_tag': dep.tag, 'to_tag': self.concept.tag, 'reason': dep.reason}
for dep in self.dependencies]
pointers = [p.json_repr(db.nodes) for p in self.concept.pointers]
goals = [p.json_repr(db.nodes) for p in self.goals]
flags = [db.flags[f] for f in self.concept.flags if f in db.flags]
d = {'tag': self.concept.tag,
'title': self.concept.title,
'id': self.concept.id,
'summary': self.concept.summary,
'goals': goals,
'pointers': pointers,
'dependencies': dependencies,
'resources': res,
'questions': self.questions,
'outlinks': outlinks,
'is_shortcut': 1,
'flags': flags,
}
if self.concept.tag in db.shortcut_times:
d['time'] = max(db.shortcut_times[self.concept.tag], work_estimates.MIN_TIME)
return d
def random_id():
"""Generate a random ID for a concept. The IDs are arbitrary, apart from the requirement that they be distinct."""
return ''.join([random.choice(string.lowercase + string.digits) for i in range(ID_LENGTH)])
| gpl-3.0 |
dazhaoniel/1kg-more | js1kg/message/views.py | 1 | 2298 | from django.shortcuts import render
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.template import RequestContext
from guardian.decorators import permission_required_or_403
from .models import MessageThread, Message
from .forms import MessageForm
from js1kg.project.models import OrgAdmin
@login_required
def messages(request):
latest_threads = MessageThread.objects.filter(Q(messages__from_user=request.user) | Q(messages__to_user=request.user)).order_by('-messages__send_time')#.distinct('id')
context = RequestContext(request, {
'latest_messages': latest_threads,
})
return render(request, 'message/messages.html', context)
@permission_required_or_403('can_view_message', (MessageThread, 'pk', 'pk'))
@login_required
def message_thread(request, pk):
thread_id = MessageThread.objects.get(id=pk)
threads = Message.objects.filter(thread_id=pk).order_by('-send_time')
related_project = MessageThread.objects.get(id=pk)
if (threads[0].from_user == request.user):
other_user = threads[0].to_user
else:
other_user = threads[0].from_user
admin_position = OrgAdmin.objects.get(user=other_user)
if request.method == 'POST':
form = MessageForm(request.POST)
if form.is_valid():
message = Message()
message = form.save(commit=False)
message.from_user=request.user
message.to_user=other_user
message.send_time=timezone.now()
message.thread=thread_id
message.first_in_thread=False
message.save()
form.save_m2m()
# form = ThreadMessageForm(from_user=request.user, to_user=other_user, send_time=timezone.now(), thread_id=thread_id, data=request.POST)
# if form.is_valid():
# form.save()
# # form = ThreadMessageForm(request.POST)
else:
form = MessageForm()
context = RequestContext(request, {
'latest_messages': threads,
'other_user': other_user,
'related_project': related_project.related_project,
'admin_position': admin_position,
'form': form,
})
return render(request, 'message/messages_thread.html', context)
| apache-2.0 |
alrusdi/lettuce | tests/integration/lib/Django-1.3/tests/regressiontests/null_fk_ordering/models.py | 92 | 1357 | """
Regression tests for proper working of ForeignKey(null=True). Tests these bugs:
* #7512: including a nullable foreign key reference in Meta ordering has un
xpected results
"""
from django.db import models
# The first two models represent a very simple null FK ordering case.
class Author(models.Model):
name = models.CharField(max_length=150)
class Article(models.Model):
title = models.CharField(max_length=150)
author = models.ForeignKey(Author, null=True)
def __unicode__(self):
return u'Article titled: %s' % (self.title, )
class Meta:
ordering = ['author__name', ]
# These following 4 models represent a far more complex ordering case.
class SystemInfo(models.Model):
system_name = models.CharField(max_length=32)
class Forum(models.Model):
system_info = models.ForeignKey(SystemInfo)
forum_name = models.CharField(max_length=32)
class Post(models.Model):
forum = models.ForeignKey(Forum, null=True)
title = models.CharField(max_length=32)
def __unicode__(self):
return self.title
class Comment(models.Model):
post = models.ForeignKey(Post, null=True)
comment_text = models.CharField(max_length=250)
class Meta:
ordering = ['post__forum__system_info__system_name', 'comment_text']
def __unicode__(self):
return self.comment_text
| gpl-3.0 |
javifo/SM-G920F-kernel | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
cneill/barbican | barbican/tests/api/controllers/test_acls.py | 2 | 43919 | # Copyright (c) 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
from barbican.api.controllers import acls
from barbican.model import repositories
from barbican.tests.api import test_resources_policy as test_policy
from barbican.tests import utils
class TestACLsWithContextMixin(test_policy.BaseTestCase):
"""Mixin for performing common acls operation used with policy logic."""
def _create_secret_with_creator_user(self, app, creator_user_id):
# define creator user for new secret entry.
app.extra_environ = {
'barbican.context': self._build_context(self.project_id,
user=creator_user_id)
}
secret_id, _ = create_secret(app)
return secret_id
def _create_container_with_creator_user(self, app, creator_user_id):
# define creator user for new container entry.
app.extra_environ = {
'barbican.context': self._build_context(self.project_id,
user=creator_user_id)
}
container_id, _ = create_container(app)
return container_id
def _set_acls_with_context(self, app, entity_type=None, op_type=None,
entity_id=None, roles=None, user=None,
enforce_policy=True, expect_errors=False):
"""Perform acl create/update/delete operation with policy logic.
Before performing acl create/update/delete, provided input is used
for setting custom barbican context. Operation is done under policy
enforcement logic.
"""
policy_enforcer = self.policy_enforcer if enforce_policy else None
app.extra_environ = {
'barbican.context': self._build_context(
self.project_id, roles=roles, user=user,
is_admin=False, policy_enforcer=policy_enforcer)
}
resp = None
if op_type == 'create':
resp = create_acls(app, entity_type, entity_id,
read_user_ids=['u1', 'u2'],
expect_errors=expect_errors)
elif op_type == 'update':
resp = update_acls(app, entity_type, entity_id,
read_user_ids=['u1', 'u2'],
partial_update=True,
expect_errors=expect_errors)
elif op_type == 'delete':
resp = app.delete('/{0}/{1}/acl'.format(entity_type, entity_id),
expect_errors=expect_errors)
return resp
class WhenTestingSecretACLsResource(utils.BarbicanAPIBaseTestCase,
TestACLsWithContextMixin):
def test_can_create_new_secret_acls(self):
"""Create secret acls and compare stored values with request data."""
secret_uuid, _ = create_secret(self.app)
resp = create_acls(
self.app, 'secrets', secret_uuid,
read_user_ids=['u1', 'u2'])
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/secrets/{0}/acl'.format(secret_uuid),
resp.json['acl_ref'])
acl_map = _get_acl_map(secret_uuid, is_secret=True)
# Check project_access is True when not provided
self.assertTrue(acl_map['read']['project_access'])
def test_who_can_create_new_secret_acls(self):
"""Test who can create new secret ACLs as per policy rules.
New secret ACLs can be created by user who created the secret.
Other user with 'creator' role in secret project cannot create ACL
if user is not creator of the secret.
User with 'admin' role in secret project can create ACL for that
secret.
"""
creator_user_id = 'creatorUserId'
secret_uuid = self._create_secret_with_creator_user(
self.app, creator_user_id)
secret_uuid2 = self._create_secret_with_creator_user(
self.app, creator_user_id)
resp = self._set_acls_with_context(
self.app, entity_type='secrets', op_type='create',
entity_id=secret_uuid, roles=['creator'], user='NotSecretCreator',
expect_errors=True)
self.assertEqual(resp.status_int, 403)
resp = self._set_acls_with_context(
self.app, entity_type='secrets', op_type='create',
entity_id=secret_uuid, roles=['creator'],
user=creator_user_id, expect_errors=False)
self.assertEqual(resp.status_int, 200)
# test for user with 'admin' role in secret project
resp = self._set_acls_with_context(
self.app, entity_type='secrets', op_type='create',
entity_id=secret_uuid2, roles=['admin'], user='AdminUser',
expect_errors=False)
self.assertEqual(resp.status_int, 200)
def test_create_new_secret_acls_with_project_access_false(self):
"""Should allow creating acls for a new secret with project-access."""
secret_uuid, _ = create_secret(self.app)
resp = create_acls(
self.app, 'secrets', secret_uuid,
read_project_access=False)
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/secrets/{0}/acl'.format(secret_uuid),
resp.json['acl_ref'])
acl_map = _get_acl_map(secret_uuid, is_secret=True)
self.assertFalse(acl_map['read']['project_access'])
def test_new_secret_acls_with_invalid_project_access_value_should_fail(
self):
"""Should fail if project-access flag is provided as string value."""
secret_uuid, _ = create_secret(self.app)
resp = create_acls(
self.app, 'secrets', secret_uuid,
read_project_access="False",
read_user_ids=['u1', 'u3', 'u4'],
expect_errors=True)
self.assertEqual(400, resp.status_int)
resp = create_acls(
self.app, 'secrets', secret_uuid,
read_project_access="None",
expect_errors=True)
self.assertEqual(400, resp.status_int)
def test_get_secret_acls_with_complete_acl_data(self):
"""Read existing acls for a with complete acl data."""
secret_id, _ = create_secret(self.app)
create_acls(
self.app, 'secrets', secret_id,
read_user_ids=['u1', 'u3'], read_project_access=False)
resp = self.app.get(
'/secrets/{0}/acl'.format(secret_id),
expect_errors=False)
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('read', resp.json)
self.assertFalse(resp.json['read']['project-access'])
self.assertIsNotNone(resp.json['read']['created'])
self.assertIsNotNone(resp.json['read']['updated'])
self.assertEqual(set(['u1', 'u3']), set(resp.json['read']['users']))
def test_get_secret_acls_with_project_access_data(self):
"""Read existing acls for acl when only project-access flag is set."""
secret_id, _ = create_secret(self.app)
create_acls(
self.app, 'secrets', secret_id,
read_project_access=False)
resp = self.app.get(
'/secrets/{0}/acl'.format(secret_id),
expect_errors=False)
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertEqual([], resp.json['read']['users'])
self.assertFalse(resp.json['read']['project-access'])
self.assertIsNotNone(resp.json['read']['created'])
self.assertIsNotNone(resp.json['read']['updated'])
def test_get_secret_acls_invalid_secret_should_fail(self):
"""Get secret acls should fail for invalid secret id.
This test applies to all secret ACLs methods as secret entity is
populated in same manner for get, put, patch, delete methods.
"""
secret_id, _ = create_secret(self.app)
create_acls(
self.app, 'secrets', secret_id,
read_project_access=True,
read_user_ids=['u1', 'u3', 'u4'])
resp = self.app.get(
'/secrets/{0}/acl'.format(uuid.uuid4().hex),
expect_errors=True)
self.assertEqual(404, resp.status_int)
def test_get_secret_acls_no_acls_defined_return_default_acl(self):
"""Get secret acls should pass when no acls defined for a secret."""
secret_id, _ = create_secret(self.app)
resp = self.app.get(
'/secrets/{0}/acl'.format(secret_id),
expect_errors=True)
self.assertEqual(200, resp.status_int)
self.assertEqual(acls.DEFAULT_ACL, resp.json)
def test_get_secret_acls_with_incorrect_uri_should_fail(self):
"""Get secret acls should fail when no acls defined for a secret."""
secret_id, _ = create_secret(self.app)
resp = self.app.get(
'/secrets/{0}/incorrect_acls'.format(secret_id),
expect_errors=True)
self.assertEqual(405, resp.status_int)
def test_full_update_secret_acls_modify_project_access_value(self):
"""ACLs full update with userids where project-access flag modified."""
secret_uuid, _ = create_secret(self.app)
create_acls(
self.app, 'secrets', secret_uuid,
read_user_ids=['u1', 'u2'],
read_project_access=False)
# update acls with no user input so it should delete existing users
resp = update_acls(
self.app, 'secrets', secret_uuid, partial_update=False,
read_project_access=True)
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/secrets/{0}/acl'.format(secret_uuid),
resp.json['acl_ref'])
acl_map = _get_acl_map(secret_uuid, is_secret=True)
self.assertTrue(acl_map['read']['project_access'])
self.assertIsNone(acl_map['read'].to_dict_fields().get('users'))
def test_full_update_secret_acls_modify_users_only(self):
"""ACLs full update where specific operation acl is modified."""
secret_uuid, _ = create_secret(self.app)
create_acls(
self.app, 'secrets', secret_uuid,
read_user_ids=['u1', 'u2'], read_project_access=False)
resp = update_acls(
self.app, 'secrets', secret_uuid, partial_update=False,
read_user_ids=['u1', 'u3', 'u5'])
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/secrets/{0}/acl'.format(secret_uuid),
resp.json['acl_ref'])
acl_map = _get_acl_map(secret_uuid, is_secret=True)
self.assertTrue(acl_map['read']['project_access'])
self.assertNotIn('u2', acl_map['read'].to_dict_fields()['users'])
self.assertEqual(set(['u1', 'u3', 'u5']),
set(acl_map['read'].to_dict_fields()['users']))
def test_full_update_secret_acls_with_read_users_only(self):
"""Acls full update where specific operation acl is modified."""
secret_uuid, _ = create_secret(self.app)
create_acls(
self.app, 'secrets', secret_uuid,
read_user_ids=['u1', 'u2'])
acl_map = _get_acl_map(secret_uuid, is_secret=True)
# ACL api does not support 'list' operation so making direct db update
# in acl operation data to make sure full update removes this existing
# ACL.
secret_acl = acl_map['read']
secret_acl.operation = 'list'
secret_acl.save()
acl_map = _get_acl_map(secret_uuid, is_secret=True)
# check 'list' operation is there in db
self.assertIn('list', acl_map)
resp = update_acls(
self.app, 'secrets', secret_uuid, partial_update=False,
read_user_ids=['u1', 'u3', 'u5'])
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/secrets/{0}/acl'.format(secret_uuid),
resp.json['acl_ref'])
acl_map = _get_acl_map(secret_uuid, is_secret=True)
# make sure 'list' operation is no longer after full update
self.assertNotIn('list', acl_map)
self.assertTrue(acl_map['read']['project_access'])
self.assertEqual(set(['u1', 'u3', 'u5']),
set(acl_map['read'].to_dict_fields()['users']))
self.assertNotIn('u2', acl_map['read'].to_dict_fields()['users'])
def test_partial_update_secret_acls_with_read_users_only(self):
"""Acls update where specific operation acl is modified."""
secret_uuid, _ = create_secret(self.app)
create_acls(
self.app, 'secrets', secret_uuid,
read_user_ids=['u1', 'u2'])
acl_map = _get_acl_map(secret_uuid, is_secret=True)
secret_acl = acl_map['read']
secret_acl.operation = 'list'
secret_acl.save()
acl_map = _get_acl_map(secret_uuid, is_secret=True)
# check 'list' operation is there in db
self.assertIn('list', acl_map)
resp = update_acls(
self.app, 'secrets', secret_uuid, partial_update=True,
read_user_ids=['u1', 'u3', 'u5'])
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/secrets/{0}/acl'.format(secret_uuid),
resp.json['acl_ref'])
acl_map = _get_acl_map(secret_uuid, is_secret=True)
# For partial update, existing other operation ACL is not tocuhed.
self.assertIn('list', acl_map)
self.assertEqual(set(['u1', 'u2']),
set(acl_map['list'].to_dict_fields()['users']))
self.assertTrue(acl_map['read']['project_access'])
self.assertEqual(set(['u1', 'u3', 'u5']),
set(acl_map['read'].to_dict_fields()['users']))
def test_partial_update_secret_acls_when_no_acls_defined_should_pass(self):
"""Acls partial update pass when no acls are defined for a secret.
Partial update (PATCH) is applicable even when no explicit ACL has been
set as by default every secret has implicit acl definition. If PUT
is used, then new ACL is created instead.
"""
secret_id, _ = create_secret(self.app)
resp = update_acls(
self.app, 'secrets', secret_id, partial_update=True,
read_user_ids=['u1', 'u3', 'u5'], expect_errors=False)
self.assertEqual(200, resp.status_int)
acl_map = _get_acl_map(secret_id, is_secret=True)
self.assertTrue(acl_map['read']['project_access'])
def test_who_can_update_secret_acls(self):
"""Test PATCH update existing secret ACLs as per policy rules.
Existing secret ACLs can be updated by user who created the secret.
Other user with 'creator' role in secret project cannot update ACL
if user is not creator of the secret.
User with 'admin' role in secret project can update ACL for that
secret.
"""
creator_user_id = 'creatorUserId'
secret_uuid = self._create_secret_with_creator_user(
self.app, creator_user_id)
self._set_acls_with_context(
self.app, entity_type='secrets', op_type='create',
entity_id=secret_uuid, enforce_policy=False)
resp = self._set_acls_with_context(
self.app, entity_type='secrets', op_type='update',
entity_id=secret_uuid, roles=['creator'], user='NotSecretCreator',
expect_errors=True)
self.assertEqual(resp.status_int, 403)
resp = self._set_acls_with_context(
self.app, entity_type='secrets', op_type='update',
entity_id=secret_uuid, roles=['creator'],
user=creator_user_id)
self.assertEqual(resp.status_int, 200)
# test for user with 'admin' role in secret project
resp = self._set_acls_with_context(
self.app, entity_type='secrets', op_type='update',
entity_id=secret_uuid, roles=['admin'], user='AdminUser')
self.assertEqual(resp.status_int, 200)
def test_partial_update_secret_acls_modify_project_access_values(self):
"""Acls partial update where project-access flag is modified."""
secret_uuid, _ = create_secret(self.app)
create_acls(
self.app, 'secrets', secret_uuid,
read_user_ids=['u1', 'u2'],
read_project_access=False)
resp = update_acls(
self.app, 'secrets', secret_uuid, partial_update=True,
read_project_access=True)
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/secrets/{0}/acl'.format(secret_uuid),
resp.json['acl_ref'])
acl_map = _get_acl_map(secret_uuid, is_secret=True)
self.assertTrue(acl_map['read']['project_access'])
self.assertEqual(set(['u1', 'u2']),
set(acl_map['read'].to_dict_fields()['users']))
def test_delete_secret_acls_with_valid_secret_id(self):
"""Delete existing acls for a given secret."""
secret_id, _ = create_secret(self.app)
create_acls(
self.app, 'secrets', secret_id,
read_project_access=True)
resp = self.app.delete(
'/secrets/{0}/acl'.format(secret_id),
expect_errors=False)
content = resp.json
self.assertIsNone(content) # make sure there is no response
self.assertEqual(200, resp.status_int)
acl_map = _get_acl_map(secret_id, is_secret=True)
self.assertFalse(acl_map)
def test_delete_secret_acls_no_acl_defined_should_pass(self):
"""Delete acls should pass when no acls are defined for a secret."""
secret_id, _ = create_secret(self.app)
resp = self.app.delete(
'/secrets/{0}/acl'.format(secret_id),
expect_errors=False)
self.assertEqual(200, resp.status_int)
def test_who_can_delete_secret_acls(self):
"""Test who can delete existing secret ACLs as per policy rules.
Existing secret ACLs can be deleted by user who created the secret.
Other user with 'creator' role in secret project cannot delete ACL
if user is not creator of the secret.
User with 'admin' role in secret project can delete ACL for that
secret.
"""
creator_user_id = 'creatorUserId'
secret_uuid = self._create_secret_with_creator_user(
self.app, creator_user_id)
self._set_acls_with_context(
self.app, entity_type='secrets', op_type='create',
entity_id=secret_uuid, enforce_policy=False)
resp = self._set_acls_with_context(
self.app, entity_type='secrets', op_type='delete',
entity_id=secret_uuid, roles=['creator'], user='NotSecretCreator',
expect_errors=True)
self.assertEqual(resp.status_int, 403)
resp = self._set_acls_with_context(
self.app, entity_type='secrets', op_type='delete',
entity_id=secret_uuid, roles=['creator'],
user=creator_user_id)
self.assertEqual(resp.status_int, 200)
# Create new secret ACLs again.
self._set_acls_with_context(
self.app, entity_type='secrets', op_type='create',
entity_id=secret_uuid, enforce_policy=False)
# test for user with 'admin' role in secret project
resp = self._set_acls_with_context(
self.app, entity_type='secrets', op_type='delete',
entity_id=secret_uuid, roles=['admin'],
user='AdminUser')
self.assertEqual(resp.status_int, 200)
def test_invoke_secret_acls_head_should_fail(self):
"""Should fail as put request to secret acls URI is not supported."""
secret_id, _ = create_secret(self.app)
resp = self.app.head(
'/secrets/{0}/acl'.format(secret_id),
expect_errors=True)
self.assertEqual(405, resp.status_int)
class WhenTestingContainerAclsResource(utils.BarbicanAPIBaseTestCase,
TestACLsWithContextMixin):
def test_can_create_new_container_acls(self):
"""Create container acls and compare db values with request data."""
container_id, _ = create_container(self.app)
resp = create_acls(
self.app, 'containers', container_id,
read_user_ids=['u1', 'u2'])
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/containers/{0}/acl'.format(container_id),
resp.json['acl_ref'])
acl_map = _get_acl_map(container_id, is_secret=False)
# Check project_access is True when not provided
self.assertTrue(acl_map['read']['project_access'])
self.assertEqual(set(['u1', 'u2']),
set(acl_map['read'].to_dict_fields()['users']))
def test_who_can_create_new_container_acls(self):
"""Test who can create new container ACLs as per policy rules.
New container ACLs can be created by user who created the container.
Other user with 'creator' role in container project cannot create ACL
if user is not creator of the container.
User with 'admin' role in container project can create ACL for that
container.
"""
creator_user_id = 'creatorUserId'
container_id = self._create_container_with_creator_user(
self.app, creator_user_id)
container_id2 = self._create_container_with_creator_user(
self.app, creator_user_id)
resp = self._set_acls_with_context(
self.app, entity_type='containers', op_type='create',
entity_id=container_id, roles=['creator'],
user='NotContainerCreator', expect_errors=True)
self.assertEqual(resp.status_int, 403)
resp = self._set_acls_with_context(
self.app, entity_type='containers', op_type='create',
entity_id=container_id, roles=['creator'],
user=creator_user_id, expect_errors=False)
self.assertEqual(resp.status_int, 200)
# test for user with 'admin' role in container project
resp = self._set_acls_with_context(
self.app, entity_type='containers', op_type='create',
entity_id=container_id2, roles=['admin'], user='AdminUser',
expect_errors=False)
self.assertEqual(resp.status_int, 200)
def test_create_new_container_acls_with_project_access_true(self):
"""Should allow creating acls for new container with project-access."""
container_id, _ = create_container(self.app)
resp = create_acls(
self.app, 'containers', container_id,
read_project_access=True,
read_user_ids=['u1', 'u3', 'u4'])
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/containers/{0}/acl'.format(container_id),
resp.json['acl_ref'])
acl_map = _get_acl_map(container_id, is_secret=False)
self.assertTrue(acl_map['read']['project_access'])
def test_create_new_container_acls_with_project_access_false(self):
"""Should allow creating acls for new container with project-access."""
container_id, _ = create_container(self.app)
resp = create_acls(
self.app, 'containers', container_id,
read_project_access=False,
read_user_ids=['u1', 'u3', 'u4'])
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/containers/{0}/acl'.format(container_id),
resp.json['acl_ref'])
acl_map = _get_acl_map(container_id, is_secret=False)
self.assertFalse(acl_map['read']['project_access'])
def test_container_acls_with_invalid_project_access_value_fail(self):
"""Should fail if project-access flag is provided as string value."""
container_id, _ = create_container(self.app)
resp = create_acls(
self.app, 'containers', container_id,
read_project_access="False",
read_user_ids=['u1', 'u3', 'u4'],
expect_errors=True)
self.assertEqual(400, resp.status_int)
resp = create_acls(
self.app, 'containers', container_id,
read_project_access="None",
expect_errors=True)
self.assertEqual(400, resp.status_int)
def test_get_container_acls_with_complete_acl_data(self):
"""Read existing acls for a with complete acl data."""
container_id, _ = create_container(self.app)
create_acls(
self.app, 'containers', container_id,
read_user_ids=['u1', 'u3'], read_project_access=False)
resp = self.app.get(
'/containers/{0}/acl'.format(container_id),
expect_errors=False)
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('read', resp.json)
self.assertFalse(resp.json['read']['project-access'])
self.assertIsNotNone(resp.json['read']['created'])
self.assertIsNotNone(resp.json['read']['updated'])
self.assertEqual(set(['u1', 'u3']), set(resp.json['read']['users']))
def test_get_container_acls_with_project_access_data(self):
"""Read existing acls for acl when only project-access flag is set."""
container_id, _ = create_container(self.app)
create_acls(
self.app, 'containers', container_id,
read_project_access=False)
resp = self.app.get(
'/containers/{0}/acl'.format(container_id),
expect_errors=False)
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertEqual([], resp.json['read']['users'])
self.assertFalse(resp.json['read']['project-access'])
self.assertIsNotNone(resp.json['read']['created'])
self.assertIsNotNone(resp.json['read']['updated'])
def test_get_container_acls_invalid_container_id_should_fail(self):
"""Get container acls should fail for invalid secret id.
This test applies to all container ACLs methods as secret entity is
populated in same manner for get, put, patch, delete methods.
"""
container_id, _ = create_container(self.app)
create_acls(
self.app, 'containers', container_id,
read_project_access=True)
resp = self.app.get(
'/containers/{0}/acl'.format(uuid.uuid4().hex),
expect_errors=True)
self.assertEqual(404, resp.status_int)
def test_get_container_acls_invalid_non_uuid_secret_should_fail(self):
"""Get container acls should fail for invalid (non-uuid) id."""
container_id, _ = create_container(self.app)
create_acls(
self.app, 'containers', container_id,
read_project_access=True)
resp = self.app.get(
'/containers/{0}/acl'.format('my_container_id'),
expect_errors=True)
self.assertEqual(404, resp.status_int)
def test_get_container_acls_no_acls_defined_return_default_acl(self):
"""Get container acls should pass when no acls defined for a secret."""
container_id, _ = create_container(self.app)
resp = self.app.get(
'/containers/{0}/acl'.format(container_id),
expect_errors=True)
self.assertEqual(200, resp.status_int)
self.assertEqual(acls.DEFAULT_ACL, resp.json)
def test_full_update_container_acls_modify_all_acls(self):
"""Acls update where only user ids list is modified."""
container_id, _ = create_container(self.app)
create_acls(
self.app, 'containers', container_id, read_project_access=False,
read_user_ids=['u1', 'u2'])
resp = update_acls(
self.app, 'containers', container_id, partial_update=False,
read_user_ids=['u1', 'u2', 'u5'])
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/containers/{0}/acl'.format(container_id),
resp.json['acl_ref'])
acl_map = _get_acl_map(container_id, is_secret=False)
# Check project_access is True when not provided
self.assertTrue(acl_map['read']['project_access'])
self.assertIn('u5', acl_map['read'].to_dict_fields()['users'])
def test_full_update_container_acls_modify_project_access_values(self):
"""Acls update where user ids and project-access flag is modified."""
container_id, _ = create_container(self.app)
create_acls(
self.app, 'containers', container_id,
read_user_ids=['u1', 'u2'])
resp = update_acls(
self.app, 'containers', container_id, partial_update=False,
read_project_access=False)
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/containers/{0}/acl'.format(container_id),
resp.json['acl_ref'])
acl_map = _get_acl_map(container_id, is_secret=False)
self.assertFalse(acl_map['read']['project_access'])
self.assertIsNone(acl_map['read'].to_dict_fields().get('users'))
def test_full_update_container_acls_with_read_users_only(self):
"""Acls full update where specific operation acl is modified."""
container_id, _ = create_container(self.app)
create_acls(
self.app, 'containers', container_id,
read_user_ids=['u1', 'u2'])
acl_map = _get_acl_map(container_id, is_secret=False)
# ACL api does not support 'list' operation so making direct db update
# in acl operation data to make sure full update removes this existing
# ACL.
container_acl = acl_map['read']
container_acl.operation = 'list'
container_acl.save()
acl_map = _get_acl_map(container_id, is_secret=False)
# check 'list' operation is there in db
self.assertIn('list', acl_map)
resp = update_acls(
self.app, 'containers', container_id, partial_update=False,
read_user_ids=['u1', 'u3', 'u5'])
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/containers/{0}/acl'.format(container_id),
resp.json['acl_ref'])
acl_map = _get_acl_map(container_id, is_secret=False)
# make sure 'list' operation is no longer after full update
self.assertNotIn('list', acl_map)
self.assertTrue(acl_map['read']['project_access'])
self.assertEqual(set(['u1', 'u3', 'u5']),
set(acl_map['read'].to_dict_fields()['users']))
self.assertNotIn('u2', acl_map['read'].to_dict_fields()['users'])
def test_partial_update_container_acls_with_read_users_only(self):
"""Acls update where specific operation acl is modified."""
container_id, _ = create_container(self.app)
create_acls(
self.app, 'containers', container_id,
read_user_ids=['u1', 'u2'])
acl_map = _get_acl_map(container_id, is_secret=False)
secret_acl = acl_map['read']
secret_acl.operation = 'list'
secret_acl.save()
acl_map = _get_acl_map(container_id, is_secret=False)
# check 'list' operation is there in db
self.assertIn('list', acl_map)
resp = update_acls(
self.app, 'containers', container_id, partial_update=True,
read_user_ids=['u1', 'u3', 'u5'])
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/containers/{0}/acl'.format(container_id),
resp.json['acl_ref'])
acl_map = _get_acl_map(container_id, is_secret=False)
# For partial update, existing other operation ACL is not tocuhed.
self.assertIn('list', acl_map)
self.assertEqual(set(['u1', 'u2']),
set(acl_map['list'].to_dict_fields()['users']))
self.assertTrue(acl_map['read']['project_access'])
self.assertEqual(set(['u1', 'u3', 'u5']),
set(acl_map['read'].to_dict_fields()['users']))
def test_partial_update_container_acls_when_no_acls_defined(self):
"""Acls partial update pass when no acls are defined for container.
Partial update (PATCH) is applicable even when no explicit ACL has been
set as by default every container has implicit acl definition. If PUT
is used, then new ACL is created instead.
"""
container_id, _ = create_container(self.app)
resp = update_acls(
self.app, 'containers', container_id, partial_update=True,
read_user_ids=['u1', 'u3', 'u5'], expect_errors=False)
self.assertEqual(200, resp.status_int)
acl_map = _get_acl_map(container_id, is_secret=False)
self.assertTrue(acl_map['read']['project_access'])
def test_partial_update_container_acls_modify_project_access_values(self):
"""Acls partial update where project-access flag is modified."""
container_id, _ = create_container(self.app)
create_acls(
self.app, 'containers', container_id,
read_user_ids=['u1', 'u2'],
read_project_access=False)
resp = update_acls(
self.app, 'containers', container_id, partial_update=True,
read_project_access=True)
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/containers/{0}/acl'.format(container_id),
resp.json['acl_ref'])
acl_map = _get_acl_map(container_id, is_secret=False)
self.assertTrue(acl_map['read']['project_access'])
self.assertEqual(set(['u1', 'u2']),
set(acl_map['read'].to_dict_fields()['users']))
def test_who_can_update_container_acls(self):
"""Test PATCH update existing container ACLs as per policy rules.
Existing container ACLs can be updated by user who created the
container.
Other user with 'creator' role in container project cannot update ACL
if user is not creator of the container.
User with 'admin' role in container project can update ACL for that
container.
"""
creator_user_id = 'creatorUserId'
container_id = self._create_container_with_creator_user(
self.app, creator_user_id)
self._set_acls_with_context(
self.app, entity_type='containers', op_type='create',
entity_id=container_id, enforce_policy=False)
resp = self._set_acls_with_context(
self.app, entity_type='containers', op_type='update',
entity_id=container_id, roles=['creator'], user='NotCreator',
expect_errors=True)
self.assertEqual(resp.status_int, 403)
resp = self._set_acls_with_context(
self.app, entity_type='containers', op_type='update',
entity_id=container_id, roles=['creator'],
user=creator_user_id)
self.assertEqual(resp.status_int, 200)
# test for user with 'admin' role in container project
resp = self._set_acls_with_context(
self.app, entity_type='containers', op_type='update',
entity_id=container_id, roles=['admin'], user='AdminUser')
self.assertEqual(resp.status_int, 200)
def test_delete_container_acls_with_valid_container_id(self):
"""Delete existing acls for a given container."""
container_id, _ = create_container(self.app)
create_acls(
self.app, 'containers', container_id,
read_project_access=True)
resp = self.app.delete(
'/containers/{0}/acl'.format(container_id),
expect_errors=False)
content = resp.json
self.assertIsNone(content) # make sure there is no response
self.assertEqual(200, resp.status_int)
acl_map = _get_acl_map(container_id, is_secret=False)
self.assertFalse(acl_map)
def test_delete_container_acls_no_acl_defined_should_pass(self):
"""Delete acls should pass when no acls are defined for a container."""
container_id, _ = create_container(self.app)
resp = self.app.delete(
'/containers/{0}/acl'.format(container_id),
expect_errors=False)
self.assertEqual(200, resp.status_int)
def test_who_can_delete_container_acls(self):
"""Test who can delete existing container ACLs as per policy rules.
Existing container ACLs can be deleted by user who created the
container.
Other user with 'creator' role in container project cannot delete ACL
if user is not creator of the container.
User with 'admin' role in container project can delete ACL for that
container.
"""
creator_user_id = 'creatorUserId'
container_id = self._create_container_with_creator_user(
self.app, creator_user_id)
self._set_acls_with_context(
self.app, entity_type='containers', op_type='create',
entity_id=container_id, enforce_policy=False)
resp = self._set_acls_with_context(
self.app, entity_type='containers', op_type='delete',
entity_id=container_id, roles=['creator'], user='NotCreator',
expect_errors=True)
self.assertEqual(resp.status_int, 403)
resp = self._set_acls_with_context(
self.app, entity_type='containers', op_type='delete',
entity_id=container_id, roles=['creator'],
user=creator_user_id)
self.assertEqual(resp.status_int, 200)
# Create new container ACLs again.
self._set_acls_with_context(
self.app, entity_type='containers', op_type='create',
entity_id=container_id, enforce_policy=False)
# test for user with 'admin' role in container project
resp = self._set_acls_with_context(
self.app, entity_type='containers', op_type='delete',
entity_id=container_id, roles=['admin'],
user='AdminUser')
self.assertEqual(resp.status_int, 200)
def test_invoke_container_acls_head_should_fail(self):
"""PUT request to container acls URI is not supported."""
container_id, _ = create_container(self.app)
resp = self.app.head(
'/containers/{0}/acl/'.format(container_id),
expect_errors=True)
self.assertEqual(405, resp.status_int)
# ----------------------- Helper Functions ---------------------------
def create_secret(app, name=None, algorithm=None, bit_length=None, mode=None,
expiration=None, payload=b'not-encrypted',
content_type='text/plain',
content_encoding=None, transport_key_id=None,
transport_key_needed=None, expect_errors=False):
request = {
'name': name,
'algorithm': algorithm,
'bit_length': bit_length,
'mode': mode,
'expiration': expiration,
'payload': payload,
'payload_content_type': content_type,
'payload_content_encoding': content_encoding,
'transport_key_id': transport_key_id,
'transport_key_needed': transport_key_needed
}
cleaned_request = {key: val for key, val in request.items()
if val is not None}
resp = app.post_json(
'/secrets/',
cleaned_request,
expect_errors=expect_errors
)
created_uuid = None
if resp.status_int == 201:
secret_ref = resp.json.get('secret_ref', '')
_, created_uuid = os.path.split(secret_ref)
return created_uuid, resp
def create_container(app):
_, resp = create_secret(app)
secret_ref = resp.json['secret_ref']
request = {
"name": "container name",
"type": "generic",
"secret_refs": [
{
"name": "any_key",
"secret_ref": secret_ref
}
]
}
resp = app.post_json(
'/containers/',
request,
expect_errors=False
)
created_uuid = None
if resp.status_int == 201:
container_ref = resp.json.get('container_ref', '')
_, created_uuid = os.path.split(container_ref)
return created_uuid, resp
def create_acls(app, entity_type, entity_id, read_user_ids=None,
read_project_access=None,
expect_errors=False):
return manage_acls(app, entity_type, entity_id,
read_user_ids=read_user_ids,
read_project_access=read_project_access,
is_update=False, partial_update=False,
expect_errors=expect_errors)
def update_acls(app, entity_type, entity_id, read_user_ids=None,
read_project_access=None, partial_update=False,
expect_errors=False):
return manage_acls(app, entity_type, entity_id,
read_user_ids=read_user_ids,
read_project_access=read_project_access,
is_update=True, partial_update=partial_update,
expect_errors=expect_errors)
def manage_acls(app, entity_type, entity_id, read_user_ids=None,
read_project_access=None, is_update=False,
partial_update=None, expect_errors=False):
request = {}
_append_acl_to_request(request, 'read', read_user_ids,
read_project_access)
cleaned_request = {key: val for key, val in request.items()
if val is not None}
if is_update and partial_update: # patch for partial update
resp = app.patch_json(
'/{0}/{1}/acl'.format(entity_type, entity_id),
cleaned_request,
expect_errors=expect_errors)
else: # put (for create or complete update)
resp = app.put_json(
'/{0}/{1}/acl'.format(entity_type, entity_id),
cleaned_request,
expect_errors=expect_errors)
return resp
def _append_acl_to_request(req, operation, user_ids=None, project_access=None):
op_dict = {}
if user_ids is not None:
op_dict['users'] = user_ids
if project_access is not None:
op_dict['project-access'] = project_access
if op_dict:
req[operation] = op_dict
def _get_acl_map(entity_id, is_secret=True):
"""Provides map of operation: acl_entity for given entity id."""
if is_secret:
acl_repo = repositories.get_secret_acl_repository()
acl_map = {acl.operation: acl for acl in
acl_repo.get_by_secret_id(entity_id)}
else:
acl_repo = repositories.get_container_acl_repository()
acl_map = {acl.operation: acl for acl in
acl_repo.get_by_container_id(entity_id)}
return acl_map
| apache-2.0 |
RapidRatings/mongo-connector | mongo_connector/doc_managers/solr_doc_manager.py | 1 | 8749 | # Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Receives documents from the oplog worker threads and indexes them
into the backend.
This file is a document manager for the Solr search engine, but the intent
is that this file can be used as an example to add on different backends.
To extend this to other systems, simply implement the exact same class and
replace the method definitions with API calls for the desired backend.
"""
import re
import json
import bson.json_util as bsjson
from pysolr import Solr, SolrError
from mongo_connector import errors
from mongo_connector.constants import DEFAULT_COMMIT_INTERVAL
from mongo_connector.util import retry_until_ok
ADMIN_URL = 'admin/luke?show=schema&wt=json'
decoder = json.JSONDecoder()
class DocManager():
"""The DocManager class creates a connection to the backend engine and
adds/removes documents, and in the case of rollback, searches for them.
The reason for storing id/doc pairs as opposed to doc's is so that multiple
updates to the same doc reflect the most up to date version as opposed to
multiple, slightly different versions of a doc.
"""
def __init__(self, url, auto_commit_interval=DEFAULT_COMMIT_INTERVAL,
unique_key='_id', **kwargs):
"""Verify Solr URL and establish a connection.
"""
self.solr = Solr(url)
self.unique_key = unique_key
# pysolr does things in milliseconds
if auto_commit_interval is not None:
self.auto_commit_interval = auto_commit_interval * 1000
else:
self.auto_commit_interval = None
self.field_list = []
self._build_fields()
def _parse_fields(self, result, field_name):
""" If Schema access, parse fields and build respective lists
"""
field_list = []
for key, value in result.get('schema', {}).get(field_name, {}).items():
if key not in field_list:
field_list.append(key)
return field_list
def _build_fields(self):
""" Builds a list of valid fields
"""
declared_fields = self.solr._send_request('get', ADMIN_URL)
result = decoder.decode(declared_fields)
self.field_list = self._parse_fields(result, 'fields')
# Build regular expressions to match dynamic fields.
# dynamic field names may have exactly one wildcard, either at
# the beginning or the end of the name
self._dynamic_field_regexes = []
for wc_pattern in self._parse_fields(result, 'dynamicFields'):
if wc_pattern[0] == "*":
self._dynamic_field_regexes.append(
re.compile("\w%s\Z" % wc_pattern))
elif wc_pattern[-1] == "*":
self._dynamic_field_regexes.append(
re.compile("\A%s\w*" % wc_pattern[:-1]))
def _clean_doc(self, doc):
"""Reformats the given document before insertion into Solr.
This method reformats the document in the following ways:
- removes extraneous fields that aren't defined in schema.xml
- unwinds arrays in order to find and later flatten sub-documents
- flattens the document so that there are no sub-documents, and every
value is associated with its dot-separated path of keys
An example:
{"a": 2,
"b": {
"c": {
"d": 5
}
},
"e": [6, 7, 8]
}
becomes:
{"a": 2, "b.c.d": 5, "e.0": 6, "e.1": 7, "e.2": 8}
"""
# SOLR cannot index fields within sub-documents, so flatten documents
# with the dot-separated path to each value as the respective key
def flattened(doc):
def flattened_kernel(doc, path):
for k, v in doc.items():
path.append(k)
if isinstance(v, dict):
for inner_k, inner_v in flattened_kernel(v, path):
yield inner_k, inner_v
elif isinstance(v, list):
for li, lv in enumerate(v):
path.append(str(li))
if isinstance(lv, dict):
for dk, dv in flattened_kernel(lv, path):
yield dk, dv
else:
yield ".".join(path), lv
path.pop()
else:
yield ".".join(path), v
path.pop()
return dict(flattened_kernel(doc, []))
# Translate the _id field to whatever unique key we're using
doc[self.unique_key] = doc["_id"]
flat_doc = doc
# Only include fields that are explicitly provided in the
# schema or match one of the dynamic field patterns, if
# we were able to retrieve the schema
if len(self.field_list) + len(self._dynamic_field_regexes) > 0:
def include_field(field):
return field in self.field_list or any(
regex.match(field) for regex in self._dynamic_field_regexes
)
return dict((k, v) for k, v in flat_doc.items() if include_field(k))
return flat_doc
def stop(self):
""" Stops the instance
"""
pass
def upsert(self, doc):
"""Update or insert a document into Solr
This method should call whatever add/insert/update method exists for
the backend engine and add the document in there. The input will
always be one mongo document, represented as a Python dictionary.
"""
try:
if self.auto_commit_interval is not None:
self.solr.add([self._clean_doc(doc)],
commit=(self.auto_commit_interval == 0),
commitWithin=str(self.auto_commit_interval))
else:
self.solr.add([self._clean_doc(doc)], commit=False)
except SolrError:
raise errors.OperationFailed(
"Could not insert %r into Solr" % bsjson.dumps(doc))
def bulk_upsert(self, docs):
"""Update or insert multiple documents into Solr
docs may be any iterable
"""
try:
cleaned = (self._clean_doc(d) for d in docs)
if self.auto_commit_interval is not None:
self.solr.add(cleaned, commit=(self.auto_commit_interval == 0),
commitWithin=str(self.auto_commit_interval))
else:
self.solr.add(cleaned, commit=False)
except SolrError:
raise errors.OperationFailed(
"Could not bulk-insert documents into Solr")
def remove(self, doc):
"""Removes documents from Solr
The input is a python dictionary that represents a mongo document.
"""
self.solr.delete(id=str(doc[self.unique_key]),
commit=(self.auto_commit_interval == 0))
def _remove(self):
"""Removes everything
"""
self.solr.delete(q='*:*', commit=(self.auto_commit_interval == 0))
def search(self, start_ts, end_ts):
"""Called to query Solr for documents in a time range.
"""
query = '_ts: [%s TO %s]' % (start_ts, end_ts)
return self.solr.search(query, rows=100000000)
def _search(self, query):
"""For test purposes only. Performs search on Solr with given query
Does not have to be implemented.
"""
return self.solr.search(query, rows=200)
def commit(self):
"""This function is used to force a commit.
"""
retry_until_ok(self.solr.commit)
def get_last_doc(self):
"""Returns the last document stored in the Solr engine.
"""
#search everything, sort by descending timestamp, return 1 row
try:
result = self.solr.search('*:*', sort='_ts desc', rows=1)
except ValueError:
return None
if len(result) == 0:
return None
return result.docs[0]
| apache-2.0 |
ekaputra07/gempa-monitor | lib/bs4/tests/test_html5lib.py | 20 | 3910 | """Tests to ensure that the html5lib tree builder generates good trees."""
import warnings
try:
from bs4.builder import HTML5TreeBuilder
HTML5LIB_PRESENT = True
except ImportError, e:
HTML5LIB_PRESENT = False
from bs4.element import SoupStrainer
from bs4.testing import (
HTML5TreeBuilderSmokeTest,
SoupTest,
skipIf,
)
@skipIf(
not HTML5LIB_PRESENT,
"html5lib seems not to be present, not testing its tree builder.")
class HTML5LibBuilderSmokeTest(SoupTest, HTML5TreeBuilderSmokeTest):
"""See ``HTML5TreeBuilderSmokeTest``."""
@property
def default_builder(self):
return HTML5TreeBuilder()
def test_soupstrainer(self):
# The html5lib tree builder does not support SoupStrainers.
strainer = SoupStrainer("b")
markup = "<p>A <b>bold</b> statement.</p>"
with warnings.catch_warnings(record=True) as w:
soup = self.soup(markup, parse_only=strainer)
self.assertEqual(
soup.decode(), self.document_for(markup))
self.assertTrue(
"the html5lib tree builder doesn't support parse_only" in
str(w[0].message))
def test_correctly_nested_tables(self):
"""html5lib inserts <tbody> tags where other parsers don't."""
markup = ('<table id="1">'
'<tr>'
"<td>Here's another table:"
'<table id="2">'
'<tr><td>foo</td></tr>'
'</table></td>')
self.assertSoupEquals(
markup,
'<table id="1"><tbody><tr><td>Here\'s another table:'
'<table id="2"><tbody><tr><td>foo</td></tr></tbody></table>'
'</td></tr></tbody></table>')
self.assertSoupEquals(
"<table><thead><tr><td>Foo</td></tr></thead>"
"<tbody><tr><td>Bar</td></tr></tbody>"
"<tfoot><tr><td>Baz</td></tr></tfoot></table>")
def test_xml_declaration_followed_by_doctype(self):
markup = '''<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<p>foo</p>
</body>
</html>'''
soup = self.soup(markup)
# Verify that we can reach the <p> tag; this means the tree is connected.
self.assertEqual(b"<p>foo</p>", soup.p.encode())
def test_reparented_markup(self):
markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>'
soup = self.soup(markup)
self.assertEqual(u"<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p></body>", soup.body.decode())
self.assertEqual(2, len(soup.find_all('p')))
def test_reparented_markup_ends_with_whitespace(self):
markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>\n'
soup = self.soup(markup)
self.assertEqual(u"<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p>\n</body>", soup.body.decode())
self.assertEqual(2, len(soup.find_all('p')))
def test_reparented_markup_containing_identical_whitespace_nodes(self):
"""Verify that we keep the two whitespace nodes in this
document distinct when reparenting the adjacent <tbody> tags.
"""
markup = '<table> <tbody><tbody><ims></tbody> </table>'
soup = self.soup(markup)
space1, space2 = soup.find_all(string=' ')
tbody1, tbody2 = soup.find_all('tbody')
assert space1.next_element is tbody1
assert tbody2.next_element is space2
def test_processing_instruction(self):
"""Processing instructions become comments."""
markup = b"""<?PITarget PIContent?>"""
soup = self.soup(markup)
assert str(soup).startswith("<!--?PITarget PIContent?-->")
def test_cloned_multivalue_node(self):
markup = b"""<a class="my_class"><p></a>"""
soup = self.soup(markup)
a1, a2 = soup.find_all('a')
self.assertEqual(a1, a2)
assert a1 is not a2
| mit |
gic888/MIEN | optimizers/arraystore.py | 1 | 5947 | #!/usr/bin/env python
## Copyright (C) 2005-2006 Graham I Cummins
## This program is free software; you can redistribute it and/or modify it under
## the terms of the GNU General Public License as published by the Free Software
## Foundation; either version 2 of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful, but WITHOUT ANY
## WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
## PARTICULAR PURPOSE. See the GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License along with
## this program; if not, write to the Free Software Foundation, Inc., 59 Temple
## Place, Suite 330, Boston, MA 02111-1307 USA
##
'''
Specialized alternative to shelve. provides a class that stores a list of
arrays.All arrays are of type Float32, 1D, and of the same length. File format
is simple binary, always littlendian, with the first 4 bytes encoding the width
as an unsigned int ("<I")
'''
import struct, os, tempfile
from sys import byteorder
from mien.math.array import array, float32, fromstring, reshape, zeros
def verify(fname, width, minlength=0):
'''returns true if fname exists and appears to be an arraystore with the indicated
width. If minlength is a positive integer, also require that the length of the arraystore
is at least this large'''
try:
f=open(fname, 'rb')
w=struct.unpack('<I', f.read(4))[0]
f.seek(0, 2)
fl=f.tell()
f.close()
except:
return False
if w!=width:
return False
if minlength:
l=(fl/4)-1
n=l/w
if n<minlength:
return False
return True
def empty(fname, width, safe=False):
'''Creates an empty arraystore in fname with the indicated width. If safe is False, this will overwrite an exisitng file. If safe is True, the file is created by a call to mkstemp with the indicated path as dir and prefix. This prevents overwriting any files, but changes the file name. In both cases ther function returns the file name (although if safe is False this will always be the same as the fname argument).'''
s=struct.pack('<I', width)
if not safe:
if os.path.isfile(fname):
os.unlink(fname)
f=open(fname, 'wb')
f.write(s)
f.close()
else:
dir, fname= os.path.split(fname)
fh, fname=tempfile.mkstemp('.astore', fname, dir)
os.write(fh, s)
os.close(fh)
return fname
class ArrayStore:
dtype=float32
def __init__(self, fname, mode='r'):
'''fname is the name of a file (string), and mode is one of "w" or "r" for
read/write or read only access. Init requires that a file already exist
(use verify and empty to make new files).'''
if mode!='r':
mode='r+b'
else:
mode='rb'
self.file=file(fname, mode)
self.file.seek(0)
self.width=struct.unpack('<I', self.file.read(4))[0]
self.invert=byteorder=='big'
def __del__(self):
self.file.close()
def close(self):
self.file.close()
def __len__(self):
self.file.seek(0, 2)
fl=self.file.tell()
l=(fl/4)-1
n=l/self.width
return int(n)
def __getitem__(self, key):
l=len(self)
if type(key)==slice:
st=key.start
if st<0:
st=l+st
if st<0:
st=0
elif st>l-1:
return zeros((0, self.width), self.dtype)
sp= key.stop
if sp<0:
sp=l+sp
if sp<1:
return zeros((0, self.width), self.dtype)
ne=min(sp, l)
ne=ne-st
self.file.seek(4+4*self.width*st)
a=fromstring(self.file.read(self.width*4*ne), self.dtype)
if self.invert:
a.byteswap()
a=reshape(a, (ne, self.width))
else:
if key<0:
key=l+key
self.file.seek(4+4*self.width*key)
s=self.file.read(self.width*4)
a=fromstring(s, self.dtype)
if self.invert:
a.byteswap()
return a
def append(self, a):
if len(a.shape)!=1:
raise IOError("Can only add 1D arrays to arraystore")
if a.shape[0]!=self.width:
raise IOError("Attempt to add array of wrong size ")
if a.dtype!=self.dtype:
a=a.astype(self.dtype)
if self.invert:
a=a.copy()
a.byteswap()
s=a.tostring()
self.file.seek(0,2)
self.file.write(s)
self.file.flush()
return len(self)
def toarray(self):
self.file.seek(4)
a=fromstring(self.file.read(), self.dtype)
if self.invert:
a.byteswap()
return reshape(a, (-1, self.width))
def setarray(self, a):
'''Sets the stored values to the array a (must be of the correct width)'''
if a.shape[1]!=self.width:
raise IOError("Attempt to add array of wrong size ")
if a.dtype!=self.dtype:
a=a.astype(self.dtype)
if self.invert:
a=a.copy()
a.byteswap()
self.file.truncate(4)
self.file.seek(4)
self.file.write(a.tostring())
self.file.flush()
def tail(self, n):
'''Return the last n rows of self. If n>len(self) return the whole arry (shape[0] will be less than n). This should be exactly equivalent to self[-n:] except that Python 2.5 applies an annoying modulo operation during automatic slice construction. This function is in fact exactly the same as self[slice(-n, 2147483647, None)]'''
return self[slice(-n, 2147483647, None)]
def getColumn(self, i):
if i>=self.width:
raise IndexError("Column index exceeds width")
self.file.seek(0, 2)
fl=self.file.tell()
c=[]
adv=self.width*4
pos=4+i*4
while pos<fl-4:
self.file.seek(pos)
c.append(self.file.read(4))
pos+=adv
a=fromstring(''.join(c), self.dtype)
if self.invert:
a.byteswap()
return a
def take(self, ai):
'''Return an array containing all the rows specified in the index array ai. Ai may specify multiple occurances of the same index. This function is probably slower than numpy.take(self.toarray(), ai), but will use much less memory if ai is short and len(self) is large.'''
ret=zeros((ai.shape[0],self.width), self.dtype)
for i in ai:
self.file.seek(4+4*self.width*i)
s=self.file.read(self.width*4)
ret[i,:]=fromstring(s, self.dtype)
if self.invert:
ret.byteswap()
return ret
| gpl-2.0 |
tmeits/pybrain | pybrain/tools/neuralnets.py | 26 | 13763 | # Neural network data analysis tool collection. Makes heavy use of the logging module.
# Can generate training curves during the run (from properly setup IPython and/or with
# TkAgg backend and interactive mode - see matplotlib documentation).
__author__ = "Martin Felder"
__version__ = "$Id$"
from pylab import ion, figure, draw
import csv
from numpy import Infinity
import logging
from pybrain.datasets import ClassificationDataSet, SequentialDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised import BackpropTrainer, RPropMinusTrainer, Trainer
from pybrain.structure import SoftmaxLayer, LSTMLayer
from pybrain.utilities import setAllArgs
from pybrain.tools.plotting import MultilinePlotter
from pybrain.tools.validation import testOnSequenceData, ModuleValidator, Validator
from pybrain.tools.customxml import NetworkWriter
class NNtools(object):
""" Abstract class providing basic functionality to make neural network training more comfortable """
def __init__(self, DS, **kwargs):
""" Initialize with the training data set DS. All keywords given are set as member variables.
The following are particularly important:
:key hidden: number of hidden units
:key TDS: test data set for checking convergence
:key VDS: validation data set for final performance evaluation
:key epoinc: number of epochs to train for, before checking convergence (default: 5)
"""
self.DS = DS
self.hidden = 10
self.maxepochs = 1000
self.Graph = None
self.TDS = None
self.VDS = None
self.epoinc = 5
setAllArgs(self, kwargs)
self.trainCurve = None
def initGraphics(self, ymax=10, xmax= -1):
""" initialize the interactive graphics output window, and return a handle to the plot """
if xmax < 0:
xmax = self.maxepochs
figure(figsize=[12, 8])
ion()
draw()
#self.Graph = MultilinePlotter(autoscale=1.2 ) #xlim=[0, self.maxepochs], ylim=[0, ymax])
self.Graph = MultilinePlotter(xlim=[0, xmax], ylim=[0, ymax])
self.Graph.setLineStyle([0, 1], linewidth=2)
return self.Graph
def set(self, **kwargs):
""" convenience method to set several member variables at once """
setAllArgs(self, kwargs)
def saveTrainingCurve(self, learnfname):
""" save the training curves into a file with the given name (CSV format) """
logging.info('Saving training curves into ' + learnfname)
if self.trainCurve is None:
logging.error('No training curve available for saving!')
learnf = open(learnfname, "wb")
writer = csv.writer(learnf, dialect='excel')
nDataSets = len(self.trainCurve)
for i in range(1, len(self.trainCurve[0]) - 1):
writer.writerow([self.trainCurve[k][i] for k in range(nDataSets)])
learnf.close()
def saveNetwork(self, fname):
""" save the trained network to a file """
NetworkWriter.writeToFile(self.Trainer.module, fname)
logging.info("Network saved to: " + fname)
#=======================================================================================================
class NNregression(NNtools):
""" Learns to numerically predict the targets of a set of data, with optional online progress plots. """
def setupNN(self, trainer=RPropMinusTrainer, hidden=None, **trnargs):
""" Constructs a 3-layer FNN for regression. Optional arguments are passed on to the Trainer class. """
if hidden is not None:
self.hidden = hidden
logging.info("Constructing FNN with following config:")
FNN = buildNetwork(self.DS.indim, self.hidden, self.DS.outdim)
logging.info(str(FNN) + "\n Hidden units:\n " + str(self.hidden))
logging.info("Training FNN with following special arguments:")
logging.info(str(trnargs))
self.Trainer = trainer(FNN, dataset=self.DS, **trnargs)
def runTraining(self, convergence=0, **kwargs):
""" Trains the network on the stored dataset. If convergence is >0, check after that many epoch increments
whether test error is going down again, and stop training accordingly.
CAVEAT: No support for Sequential datasets!"""
assert isinstance(self.Trainer, Trainer)
if self.Graph is not None:
self.Graph.setLabels(x='epoch', y='normalized regression error')
self.Graph.setLegend(['training', 'test'], loc='upper right')
epoch = 0
inc = self.epoinc
best_error = Infinity
best_epoch = 0
learncurve_x = [0]
learncurve_y = [0.0]
valcurve_y = [0.0]
converged = False
convtest = 0
if convergence > 0:
logging.info("Convergence criterion: %d batches of %d epochs w/o improvement" % (convergence, inc))
while epoch <= self.maxepochs and not converged:
self.Trainer.trainEpochs(inc)
epoch += inc
learncurve_x.append(epoch)
# calculate errors on TRAINING data
err_trn = ModuleValidator.validate(Validator.MSE, self.Trainer.module, self.DS)
learncurve_y.append(err_trn)
if self.TDS is None:
logging.info("epoch: %6d, err_trn: %10g" % (epoch, err_trn))
else:
# calculate same errors on TEST data
err_tst = ModuleValidator.validate(Validator.MSE, self.Trainer.module, self.TDS)
valcurve_y.append(err_tst)
if err_tst < best_error:
# store best error and parameters
best_epoch = epoch
best_error = err_tst
bestweights = self.Trainer.module.params.copy()
convtest = 0
else:
convtest += 1
logging.info("epoch: %6d, err_trn: %10g, err_tst: %10g, best_tst: %10g" % (epoch, err_trn, err_tst, best_error))
if self.Graph is not None:
self.Graph.addData(1, epoch, err_tst)
# check if convegence criterion is fulfilled (no improvement after N epoincs)
if convtest >= convergence:
converged = True
if self.Graph is not None:
self.Graph.addData(0, epoch, err_trn)
self.Graph.update()
# training finished!
logging.info("Best epoch: %6d, with error: %10g" % (best_epoch, best_error))
if self.VDS is not None:
# calculate same errors on VALIDATION data
self.Trainer.module.params[:] = bestweights.copy()
err_val = ModuleValidator.validate(Validator.MSE, self.Trainer.module, self.VDS)
logging.info("Result on evaluation data: %10g" % err_val)
# store training curve for saving into file
self.trainCurve = (learncurve_x, learncurve_y, valcurve_y)
#=======================================================================================================
class NNclassifier(NNtools):
""" Learns to classify a set of data, with optional online progress plots. """
def __init__(self, DS, **kwargs):
""" Initialize the classifier: the least we need is the dataset to be classified. All keywords given are set as member variables. """
if not isinstance(DS, ClassificationDataSet):
raise TypeError('Need a ClassificationDataSet to do classification!')
NNtools.__init__(self, DS, **kwargs)
self.nClasses = self.DS.nClasses # need this because targets may be altered later
self.clsnames = None
self.targetsAreOneOfMany = False
def _convertAllDataToOneOfMany(self, values=[0, 1]):
""" converts all datasets associated with self into 1-out-of-many representations,
e.g. with original classes 0 to 4, the new target for class 1 would be [0,1,0,0,0],
or accordingly with other upper and lower bounds, as given by the values keyword """
if self.targetsAreOneOfMany:
return
else:
# convert all datasets to one-of-many ("winner takes all") representation
for dsname in ["DS", "TDS", "VDS"]:
d = getattr(self, dsname)
if d is not None:
if d.outdim < d.nClasses:
d._convertToOneOfMany(values)
self.targetsAreOneOfMany = True
def setupNN(self, trainer=RPropMinusTrainer, hidden=None, **trnargs):
""" Setup FNN and trainer for classification. """
self._convertAllDataToOneOfMany()
if hidden is not None:
self.hidden = hidden
FNN = buildNetwork(self.DS.indim, self.hidden, self.DS.outdim, outclass=SoftmaxLayer)
logging.info("Constructing classification FNN with following config:")
logging.info(str(FNN) + "\n Hidden units:\n " + str(self.hidden))
logging.info("Trainer received the following special arguments:")
logging.info(str(trnargs))
self.Trainer = trainer(FNN, dataset=self.DS, **trnargs)
def setupRNN(self, trainer=BackpropTrainer, hidden=None, **trnargs):
""" Setup an LSTM RNN and trainer for sequence classification. """
if hidden is not None:
self.hidden = hidden
self._convertAllDataToOneOfMany()
RNN = buildNetwork(self.DS.indim, self.hidden, self.DS.outdim, hiddenclass=LSTMLayer,
recurrent=True, outclass=SoftmaxLayer)
logging.info("Constructing classification RNN with following config:")
logging.info(str(RNN) + "\n Hidden units:\n " + str(self.hidden))
logging.info("Trainer received the following special arguments:")
logging.info(str(trnargs))
self.Trainer = trainer(RNN, dataset=self.DS, **trnargs)
def runTraining(self, convergence=0, **kwargs):
""" Trains the network on the stored dataset. If convergence is >0, check after that many epoch increments
whether test error is going down again, and stop training accordingly. """
assert isinstance(self.Trainer, Trainer)
if self.Graph is not None:
self.Graph.setLabels(x='epoch', y='% classification error')
self.Graph.setLegend(['training', 'test'], loc='lower right')
epoch = 0
inc = self.epoinc
best_error = 100.0
best_epoch = 0
learncurve_x = [0]
learncurve_y = [0.0]
valcurve_y = [0.0]
converged = False
convtest = 0
if convergence > 0:
logging.info("Convergence criterion: %d batches of %d epochs w/o improvement" % (convergence, inc))
while epoch <= self.maxepochs and not converged:
self.Trainer.trainEpochs(inc)
epoch += inc
learncurve_x.append(epoch)
# calculate errors on TRAINING data
if isinstance(self.DS, SequentialDataSet):
r_trn = 100. * (1.0 - testOnSequenceData(self.Trainer.module, self.DS))
else:
# FIXME: messy - validation does not belong into the Trainer...
out, trueclass = self.Trainer.testOnClassData(return_targets=True)
r_trn = 100. * (1.0 - Validator.classificationPerformance(out, trueclass))
learncurve_y.append(r_trn)
if self.TDS is None:
logging.info("epoch: %6d, err_trn: %5.2f%%" % (epoch, r_trn))
else:
# calculate errors on TEST data
if isinstance(self.DS, SequentialDataSet):
r_tst = 100. * (1.0 - testOnSequenceData(self.Trainer.module, self.TDS))
else:
# FIXME: messy - validation does not belong into the Trainer...
out, trueclass = self.Trainer.testOnClassData(return_targets=True, dataset=self.TDS)
r_tst = 100. * (1.0 - Validator.classificationPerformance(out, trueclass))
valcurve_y.append(r_tst)
if r_tst < best_error:
best_epoch = epoch
best_error = r_tst
bestweights = self.Trainer.module.params.copy()
convtest = 0
else:
convtest += 1
logging.info("epoch: %6d, err_trn: %5.2f%%, err_tst: %5.2f%%, best_tst: %5.2f%%" % (epoch, r_trn, r_tst, best_error))
if self.Graph is not None:
self.Graph.addData(1, epoch, r_tst)
# check if convegence criterion is fulfilled (no improvement after N epoincs)
if convtest >= convergence:
converged = True
if self.Graph is not None:
self.Graph.addData(0, epoch, r_trn)
self.Graph.update()
logging.info("Best epoch: %6d, with error: %5.2f%%" % (best_epoch, best_error))
if self.VDS is not None:
# calculate errors on VALIDATION data
self.Trainer.module.params[:] = bestweights.copy()
if isinstance(self.DS, SequentialDataSet):
r_val = 100. * (1.0 - testOnSequenceData(self.Trainer.module, self.VDS))
else:
out, trueclass = self.Trainer.testOnClassData(return_targets=True, dataset=self.VDS)
r_val = 100. * (1.0 - Validator.classificationPerformance(out, trueclass))
logging.info("Result on evaluation data: %5.2f%%" % r_val)
self.trainCurve = (learncurve_x, learncurve_y, valcurve_y)
| bsd-3-clause |
TheWardoctor/Wardoctors-repo | script.module.liveresolver/lib/liveresolver/modules/f4mproxy/utils/codec.py | 88 | 2613 | # Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""Classes for reading/writing binary data (such as TLS records)."""
from .compat import *
class Writer(object):
def __init__(self):
self.bytes = bytearray(0)
def add(self, x, length):
self.bytes += bytearray(length)
newIndex = len(self.bytes) - 1
for count in range(length):
self.bytes[newIndex] = x & 0xFF
x >>= 8
newIndex -= 1
def addFixSeq(self, seq, length):
for e in seq:
self.add(e, length)
def addVarSeq(self, seq, length, lengthLength):
self.add(len(seq)*length, lengthLength)
for e in seq:
self.add(e, length)
class Parser(object):
def __init__(self, bytes):
self.bytes = bytes
self.index = 0
def get(self, length):
if self.index + length > len(self.bytes):
raise SyntaxError()
x = 0
for count in range(length):
x <<= 8
x |= self.bytes[self.index]
self.index += 1
return x
def getFixBytes(self, lengthBytes):
if self.index + lengthBytes > len(self.bytes):
raise SyntaxError()
bytes = self.bytes[self.index : self.index+lengthBytes]
self.index += lengthBytes
return bytes
def getVarBytes(self, lengthLength):
lengthBytes = self.get(lengthLength)
return self.getFixBytes(lengthBytes)
def getFixList(self, length, lengthList):
l = [0] * lengthList
for x in range(lengthList):
l[x] = self.get(length)
return l
def getVarList(self, length, lengthLength):
lengthList = self.get(lengthLength)
if lengthList % length != 0:
raise SyntaxError()
lengthList = lengthList // length
l = [0] * lengthList
for x in range(lengthList):
l[x] = self.get(length)
return l
def startLengthCheck(self, lengthLength):
self.lengthCheck = self.get(lengthLength)
self.indexCheck = self.index
def setLengthCheck(self, length):
self.lengthCheck = length
self.indexCheck = self.index
def stopLengthCheck(self):
if (self.index - self.indexCheck) != self.lengthCheck:
raise SyntaxError()
def atLengthCheck(self):
if (self.index - self.indexCheck) < self.lengthCheck:
return False
elif (self.index - self.indexCheck) == self.lengthCheck:
return True
else:
raise SyntaxError()
| apache-2.0 |
xzturn/tensorflow | tensorflow/python/ops/control_flow_grad.py | 49 | 9381 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in control_flow_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.control_flow_ops import *
# pylint: enable=wildcard-import
def _SwitchGrad(op, *grad):
"""Gradients for a Switch op is calculated using a Merge op.
If the switch is a loop switch, it will be visited twice. We create
the merge on the first visit, and update the other input of the merge
on the second visit. A next_iteration is also added on second visit.
"""
graph = ops.get_default_graph()
# pylint: disable=protected-access
op_ctxt = op._get_control_flow_context()
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if isinstance(op_ctxt, WhileContext):
merge_grad = grad_ctxt.grad_state.switch_map.get(op)
if merge_grad is not None:
# This is the second time this Switch is visited. It comes from
# the non-exit branch of the Switch, so update the second input
# to the Merge.
# TODO(yuanbyu): Perform shape inference with this new input.
if grad[1] is not None:
# pylint: disable=protected-access
control_flow_ops._AddNextAndBackEdge(merge_grad, grad[1],
enforce_shape_invariant=False)
# pylint: enable=protected-access
return None, None
elif grad[0] is not None:
# This is the first time this Switch is visited. It comes from
# the Exit branch, which is grad[0]. grad[1] is empty at this point.
# Use grad[0] for both inputs to merge for now, but update the second
# input of merge when we see this Switch the second time.
merge_grad = merge([grad[0], grad[0]], name="b_switch")[0]
grad_ctxt.grad_state.switch_map[op] = merge_grad
return merge_grad, None
else:
# This is the first time this Switch is visited. It comes from the
# Identity branch. Such a Switch has `None` gradient for the Exit branch,
# meaning the output is not differentiable.
return None, None
elif isinstance(op_ctxt, CondContext):
zero_grad = grad[1 - op_ctxt.branch]
# At this point, we have created zero_grad guarded by the right switch.
# Unfortunately, we may still get None here for not trainable data types.
if zero_grad is None:
# For resource variables we get None always on the other branch, so bypass
# this.
if op.inputs[0].dtype == dtypes.resource:
return merge(
[grad[op_ctxt.branch]] * 2, name="cond_resource_grad")[0], None
return None, None
return merge(grad, name="cond_grad")[0], None
else:
false_grad = switch(grad[0], op.inputs[1])[0]
true_grad = switch(grad[1], op.inputs[1])[1]
return merge([false_grad, true_grad])[0], None
ops.RegisterGradient("Switch")(_SwitchGrad)
ops.RegisterGradient("RefSwitch")(_SwitchGrad)
@ops.RegisterGradient("Merge")
def _MergeGrad(op, grad, _):
"""Gradients for a Merge op are calculated using a Switch op."""
input_op = op.inputs[0].op
graph = ops.get_default_graph()
# pylint: disable=protected-access
op_ctxt = control_flow_util.GetOutputContext(input_op)
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if isinstance(op_ctxt, WhileContext):
# pylint: disable=protected-access
return control_flow_ops._SwitchRefOrTensor(grad, grad_ctxt.pivot)
# pylint: enable=protected-access
elif isinstance(op_ctxt, CondContext):
pred = op_ctxt.pred
if grad_ctxt and grad_ctxt.grad_state:
# This Merge node is part of a cond within a loop.
# The backprop needs to have the value of this predicate for every
# iteration. So we must have its values accumulated in the forward, and
# use the accumulated values as the predicate for this backprop switch.
grad_state = grad_ctxt.grad_state
real_pred = grad_state.history_map.get(pred.name)
if real_pred is None:
# Remember the value of pred for every iteration.
grad_ctxt = grad_state.grad_context
grad_ctxt.Exit()
history_pred = grad_state.AddForwardAccumulator(pred)
grad_ctxt.Enter()
# Add the stack pop op. If pred.op is in a (outer) CondContext,
# the stack pop will be guarded with a switch.
real_pred = grad_state.AddBackpropAccumulatedValue(history_pred, pred)
grad_state.history_map[pred.name] = real_pred
pred = real_pred
# pylint: disable=protected-access
return control_flow_ops._SwitchRefOrTensor(grad, pred, name="cond_grad")
# pylint: enable=protected-access
else:
num_inputs = len(op.inputs)
cond = [math_ops.equal(op.outputs[1], i) for i in xrange(num_inputs)]
# pylint: disable=protected-access
return [control_flow_ops._SwitchRefOrTensor(grad, cond[i])[1]
for i in xrange(num_inputs)]
# pylint: enable=protected-access
@ops.RegisterGradient("RefMerge")
def _RefMergeGrad(op, grad, _):
return _MergeGrad(op, grad, _)
@ops.RegisterGradient("Exit")
def _ExitGrad(op, grad):
"""Gradients for an exit op are calculated using an Enter op."""
graph = ops.get_default_graph()
# pylint: disable=protected-access
op_ctxt = op._get_control_flow_context()
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if not grad_ctxt.back_prop:
# The flag `back_prop` is set by users to suppress gradient
# computation for this loop. If the attribute `back_prop` is false,
# no gradient computation.
return None
if op_ctxt.grad_state:
raise TypeError("Second-order gradient for while loops not supported.")
if isinstance(grad, ops.Tensor):
grad_ctxt.AddName(grad.name)
else:
if not isinstance(grad, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(grad))
grad_ctxt.AddName(grad.values.name)
grad_ctxt.AddName(grad.indices.name)
dense_shape = grad.dense_shape
if dense_shape is not None:
grad_ctxt.AddName(dense_shape.name)
grad_ctxt.Enter()
# pylint: disable=protected-access
result = control_flow_ops._Enter(
grad, grad_ctxt.name, is_constant=False,
parallel_iterations=grad_ctxt.parallel_iterations,
name="b_exit")
# pylint: enable=protected-access
grad_ctxt.loop_enters.append(result)
grad_ctxt.Exit()
return result
ops.RegisterGradient("RefExit")(_ExitGrad)
@ops.RegisterGradient("NextIteration")
def _NextIterationGrad(_, grad):
"""A forward next_iteration is translated into a backprop identity.
Note that the backprop next_iteration is added in switch grad.
"""
return grad
@ops.RegisterGradient("RefNextIteration")
def _RefNextIterationGrad(_, grad):
return _NextIterationGrad(_, grad)
@ops.RegisterGradient("Enter")
def _EnterGrad(op, grad):
"""Gradients for an Enter are calculated using an Exit op.
For loop variables, grad is the gradient so just add an exit.
For loop invariants, we need to add an accumulator loop.
"""
graph = ops.get_default_graph()
# pylint: disable=protected-access
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if not grad_ctxt.back_prop:
# Skip gradient computation, if the attribute `back_prop` is false.
return grad
if grad_ctxt.grad_state is None:
# Pass the gradient through if we are not in a gradient while context.
return grad
if op.get_attr("is_constant"):
# Add a gradient accumulator for each loop invariant.
if isinstance(grad, ops.Tensor):
result = grad_ctxt.AddBackpropAccumulator(op, grad)
elif isinstance(grad, ops.IndexedSlices):
result = grad_ctxt.AddBackpropIndexedSlicesAccumulator(op, grad)
else:
# TODO(yuanbyu, lukasr): Add support for SparseTensor.
raise TypeError("Type %s not supported" % type(grad))
else:
result = exit(grad)
grad_ctxt.loop_exits.append(result)
grad_ctxt.ExitResult([result])
return result
@ops.RegisterGradient("RefEnter")
def _RefEnterGrad(op, grad):
return _EnterGrad(op, grad)
@ops.RegisterGradient("LoopCond")
def _LoopCondGrad(_):
"""Stop backprop for the predicate of a while loop."""
return None
| apache-2.0 |
eusi/MissionPlanerHM | Lib/site-packages/numpy/f2py/tests/test_callback.py | 59 | 1707 | from numpy.testing import *
from numpy import array
import math
import util
class TestF77Callback(util.F2PyTest):
code = """
subroutine t(fun,a)
integer a
cf2py intent(out) a
external fun
call fun(a)
end
subroutine func(a)
cf2py intent(in,out) a
integer a
a = a + 11
end
subroutine func0(a)
cf2py intent(out) a
integer a
a = 11
end
subroutine t2(a)
cf2py intent(callback) fun
integer a
cf2py intent(out) a
external fun
call fun(a)
end
"""
@dec.slow
def test_all(self):
for name in "t,t2".split(","):
self.check_function(name)
def check_function(self, name):
t = getattr(self.module, name)
r = t(lambda : 4)
assert r==4,`r`
r = t(lambda a:5,fun_extra_args=(6,))
assert r==5,`r`
r = t(lambda a:a,fun_extra_args=(6,))
assert r==6,`r`
r = t(lambda a:5+a,fun_extra_args=(7,))
assert r==12,`r`
r = t(lambda a:math.degrees(a),fun_extra_args=(math.pi,))
assert r==180,`r`
r = t(math.degrees,fun_extra_args=(math.pi,))
assert r==180,`r`
r = t(self.module.func, fun_extra_args=(6,))
assert r==17,`r`
r = t(self.module.func0)
assert r==11,`r`
r = t(self.module.func0._cpointer)
assert r==11,`r`
class A:
def __call__(self):
return 7
def mth(self):
return 9
a = A()
r = t(a)
assert r==7,`r`
r = t(a.mth)
assert r==9,`r`
if __name__ == "__main__":
import nose
nose.runmodule()
| gpl-3.0 |
ddki/my_study_project | language/python/frameworks/flask/venv/lib/python2.7/site-packages/setuptools/depends.py | 336 | 5837 | import sys
import imp
import marshal
from distutils.version import StrictVersion
from imp import PKG_DIRECTORY, PY_COMPILED, PY_SOURCE, PY_FROZEN
from .py33compat import Bytecode
__all__ = [
'Require', 'find_module', 'get_module_constant', 'extract_constant'
]
class Require:
"""A prerequisite to building or installing a distribution"""
def __init__(self, name, requested_version, module, homepage='',
attribute=None, format=None):
if format is None and requested_version is not None:
format = StrictVersion
if format is not None:
requested_version = format(requested_version)
if attribute is None:
attribute = '__version__'
self.__dict__.update(locals())
del self.self
def full_name(self):
"""Return full package/distribution name, w/version"""
if self.requested_version is not None:
return '%s-%s' % (self.name, self.requested_version)
return self.name
def version_ok(self, version):
"""Is 'version' sufficiently up-to-date?"""
return self.attribute is None or self.format is None or \
str(version) != "unknown" and version >= self.requested_version
def get_version(self, paths=None, default="unknown"):
"""Get version number of installed module, 'None', or 'default'
Search 'paths' for module. If not found, return 'None'. If found,
return the extracted version attribute, or 'default' if no version
attribute was specified, or the value cannot be determined without
importing the module. The version is formatted according to the
requirement's version format (if any), unless it is 'None' or the
supplied 'default'.
"""
if self.attribute is None:
try:
f, p, i = find_module(self.module, paths)
if f:
f.close()
return default
except ImportError:
return None
v = get_module_constant(self.module, self.attribute, default, paths)
if v is not None and v is not default and self.format is not None:
return self.format(v)
return v
def is_present(self, paths=None):
"""Return true if dependency is present on 'paths'"""
return self.get_version(paths) is not None
def is_current(self, paths=None):
"""Return true if dependency is present and up-to-date on 'paths'"""
version = self.get_version(paths)
if version is None:
return False
return self.version_ok(version)
def find_module(module, paths=None):
"""Just like 'imp.find_module()', but with package support"""
parts = module.split('.')
while parts:
part = parts.pop(0)
f, path, (suffix, mode, kind) = info = imp.find_module(part, paths)
if kind == PKG_DIRECTORY:
parts = parts or ['__init__']
paths = [path]
elif parts:
raise ImportError("Can't find %r in %s" % (parts, module))
return info
def get_module_constant(module, symbol, default=-1, paths=None):
"""Find 'module' by searching 'paths', and extract 'symbol'
Return 'None' if 'module' does not exist on 'paths', or it does not define
'symbol'. If the module defines 'symbol' as a constant, return the
constant. Otherwise, return 'default'."""
try:
f, path, (suffix, mode, kind) = find_module(module, paths)
except ImportError:
# Module doesn't exist
return None
try:
if kind == PY_COMPILED:
f.read(8) # skip magic & date
code = marshal.load(f)
elif kind == PY_FROZEN:
code = imp.get_frozen_object(module)
elif kind == PY_SOURCE:
code = compile(f.read(), path, 'exec')
else:
# Not something we can parse; we'll have to import it. :(
if module not in sys.modules:
imp.load_module(module, f, path, (suffix, mode, kind))
return getattr(sys.modules[module], symbol, None)
finally:
if f:
f.close()
return extract_constant(code, symbol, default)
def extract_constant(code, symbol, default=-1):
"""Extract the constant value of 'symbol' from 'code'
If the name 'symbol' is bound to a constant value by the Python code
object 'code', return that value. If 'symbol' is bound to an expression,
return 'default'. Otherwise, return 'None'.
Return value is based on the first assignment to 'symbol'. 'symbol' must
be a global, or at least a non-"fast" local in the code block. That is,
only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
must be present in 'code.co_names'.
"""
if symbol not in code.co_names:
# name's not there, can't possibly be an assignment
return None
name_idx = list(code.co_names).index(symbol)
STORE_NAME = 90
STORE_GLOBAL = 97
LOAD_CONST = 100
const = default
for byte_code in Bytecode(code):
op = byte_code.opcode
arg = byte_code.arg
if op == LOAD_CONST:
const = code.co_consts[arg]
elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL):
return const
else:
const = default
def _update_globals():
"""
Patch the globals to remove the objects not available on some platforms.
XXX it'd be better to test assertions about bytecode instead.
"""
if not sys.platform.startswith('java') and sys.platform != 'cli':
return
incompatible = 'extract_constant', 'get_module_constant'
for name in incompatible:
del globals()[name]
__all__.remove(name)
_update_globals()
| mit |
samuelmaudo/yepes | yepes/utils/unidecode/x0b6.py | 253 | 4996 | data = (
'ddyels', # 0x00
'ddyelt', # 0x01
'ddyelp', # 0x02
'ddyelh', # 0x03
'ddyem', # 0x04
'ddyeb', # 0x05
'ddyebs', # 0x06
'ddyes', # 0x07
'ddyess', # 0x08
'ddyeng', # 0x09
'ddyej', # 0x0a
'ddyec', # 0x0b
'ddyek', # 0x0c
'ddyet', # 0x0d
'ddyep', # 0x0e
'ddyeh', # 0x0f
'ddo', # 0x10
'ddog', # 0x11
'ddogg', # 0x12
'ddogs', # 0x13
'ddon', # 0x14
'ddonj', # 0x15
'ddonh', # 0x16
'ddod', # 0x17
'ddol', # 0x18
'ddolg', # 0x19
'ddolm', # 0x1a
'ddolb', # 0x1b
'ddols', # 0x1c
'ddolt', # 0x1d
'ddolp', # 0x1e
'ddolh', # 0x1f
'ddom', # 0x20
'ddob', # 0x21
'ddobs', # 0x22
'ddos', # 0x23
'ddoss', # 0x24
'ddong', # 0x25
'ddoj', # 0x26
'ddoc', # 0x27
'ddok', # 0x28
'ddot', # 0x29
'ddop', # 0x2a
'ddoh', # 0x2b
'ddwa', # 0x2c
'ddwag', # 0x2d
'ddwagg', # 0x2e
'ddwags', # 0x2f
'ddwan', # 0x30
'ddwanj', # 0x31
'ddwanh', # 0x32
'ddwad', # 0x33
'ddwal', # 0x34
'ddwalg', # 0x35
'ddwalm', # 0x36
'ddwalb', # 0x37
'ddwals', # 0x38
'ddwalt', # 0x39
'ddwalp', # 0x3a
'ddwalh', # 0x3b
'ddwam', # 0x3c
'ddwab', # 0x3d
'ddwabs', # 0x3e
'ddwas', # 0x3f
'ddwass', # 0x40
'ddwang', # 0x41
'ddwaj', # 0x42
'ddwac', # 0x43
'ddwak', # 0x44
'ddwat', # 0x45
'ddwap', # 0x46
'ddwah', # 0x47
'ddwae', # 0x48
'ddwaeg', # 0x49
'ddwaegg', # 0x4a
'ddwaegs', # 0x4b
'ddwaen', # 0x4c
'ddwaenj', # 0x4d
'ddwaenh', # 0x4e
'ddwaed', # 0x4f
'ddwael', # 0x50
'ddwaelg', # 0x51
'ddwaelm', # 0x52
'ddwaelb', # 0x53
'ddwaels', # 0x54
'ddwaelt', # 0x55
'ddwaelp', # 0x56
'ddwaelh', # 0x57
'ddwaem', # 0x58
'ddwaeb', # 0x59
'ddwaebs', # 0x5a
'ddwaes', # 0x5b
'ddwaess', # 0x5c
'ddwaeng', # 0x5d
'ddwaej', # 0x5e
'ddwaec', # 0x5f
'ddwaek', # 0x60
'ddwaet', # 0x61
'ddwaep', # 0x62
'ddwaeh', # 0x63
'ddoe', # 0x64
'ddoeg', # 0x65
'ddoegg', # 0x66
'ddoegs', # 0x67
'ddoen', # 0x68
'ddoenj', # 0x69
'ddoenh', # 0x6a
'ddoed', # 0x6b
'ddoel', # 0x6c
'ddoelg', # 0x6d
'ddoelm', # 0x6e
'ddoelb', # 0x6f
'ddoels', # 0x70
'ddoelt', # 0x71
'ddoelp', # 0x72
'ddoelh', # 0x73
'ddoem', # 0x74
'ddoeb', # 0x75
'ddoebs', # 0x76
'ddoes', # 0x77
'ddoess', # 0x78
'ddoeng', # 0x79
'ddoej', # 0x7a
'ddoec', # 0x7b
'ddoek', # 0x7c
'ddoet', # 0x7d
'ddoep', # 0x7e
'ddoeh', # 0x7f
'ddyo', # 0x80
'ddyog', # 0x81
'ddyogg', # 0x82
'ddyogs', # 0x83
'ddyon', # 0x84
'ddyonj', # 0x85
'ddyonh', # 0x86
'ddyod', # 0x87
'ddyol', # 0x88
'ddyolg', # 0x89
'ddyolm', # 0x8a
'ddyolb', # 0x8b
'ddyols', # 0x8c
'ddyolt', # 0x8d
'ddyolp', # 0x8e
'ddyolh', # 0x8f
'ddyom', # 0x90
'ddyob', # 0x91
'ddyobs', # 0x92
'ddyos', # 0x93
'ddyoss', # 0x94
'ddyong', # 0x95
'ddyoj', # 0x96
'ddyoc', # 0x97
'ddyok', # 0x98
'ddyot', # 0x99
'ddyop', # 0x9a
'ddyoh', # 0x9b
'ddu', # 0x9c
'ddug', # 0x9d
'ddugg', # 0x9e
'ddugs', # 0x9f
'ddun', # 0xa0
'ddunj', # 0xa1
'ddunh', # 0xa2
'ddud', # 0xa3
'ddul', # 0xa4
'ddulg', # 0xa5
'ddulm', # 0xa6
'ddulb', # 0xa7
'dduls', # 0xa8
'ddult', # 0xa9
'ddulp', # 0xaa
'ddulh', # 0xab
'ddum', # 0xac
'ddub', # 0xad
'ddubs', # 0xae
'ddus', # 0xaf
'dduss', # 0xb0
'ddung', # 0xb1
'dduj', # 0xb2
'dduc', # 0xb3
'dduk', # 0xb4
'ddut', # 0xb5
'ddup', # 0xb6
'dduh', # 0xb7
'ddweo', # 0xb8
'ddweog', # 0xb9
'ddweogg', # 0xba
'ddweogs', # 0xbb
'ddweon', # 0xbc
'ddweonj', # 0xbd
'ddweonh', # 0xbe
'ddweod', # 0xbf
'ddweol', # 0xc0
'ddweolg', # 0xc1
'ddweolm', # 0xc2
'ddweolb', # 0xc3
'ddweols', # 0xc4
'ddweolt', # 0xc5
'ddweolp', # 0xc6
'ddweolh', # 0xc7
'ddweom', # 0xc8
'ddweob', # 0xc9
'ddweobs', # 0xca
'ddweos', # 0xcb
'ddweoss', # 0xcc
'ddweong', # 0xcd
'ddweoj', # 0xce
'ddweoc', # 0xcf
'ddweok', # 0xd0
'ddweot', # 0xd1
'ddweop', # 0xd2
'ddweoh', # 0xd3
'ddwe', # 0xd4
'ddweg', # 0xd5
'ddwegg', # 0xd6
'ddwegs', # 0xd7
'ddwen', # 0xd8
'ddwenj', # 0xd9
'ddwenh', # 0xda
'ddwed', # 0xdb
'ddwel', # 0xdc
'ddwelg', # 0xdd
'ddwelm', # 0xde
'ddwelb', # 0xdf
'ddwels', # 0xe0
'ddwelt', # 0xe1
'ddwelp', # 0xe2
'ddwelh', # 0xe3
'ddwem', # 0xe4
'ddweb', # 0xe5
'ddwebs', # 0xe6
'ddwes', # 0xe7
'ddwess', # 0xe8
'ddweng', # 0xe9
'ddwej', # 0xea
'ddwec', # 0xeb
'ddwek', # 0xec
'ddwet', # 0xed
'ddwep', # 0xee
'ddweh', # 0xef
'ddwi', # 0xf0
'ddwig', # 0xf1
'ddwigg', # 0xf2
'ddwigs', # 0xf3
'ddwin', # 0xf4
'ddwinj', # 0xf5
'ddwinh', # 0xf6
'ddwid', # 0xf7
'ddwil', # 0xf8
'ddwilg', # 0xf9
'ddwilm', # 0xfa
'ddwilb', # 0xfb
'ddwils', # 0xfc
'ddwilt', # 0xfd
'ddwilp', # 0xfe
'ddwilh', # 0xff
)
| bsd-3-clause |
arista-eosplus/ansible-modules-extras | cloud/webfaction/webfaction_app.py | 75 | 6195 | #!/usr/bin/python
#
# Create a Webfaction application using Ansible and the Webfaction API
#
# Valid application types can be found by looking here:
# http://docs.webfaction.com/xmlrpc-api/apps.html#application-types
#
# ------------------------------------------
#
# (c) Quentin Stafford-Fraser 2015, with contributions gratefully acknowledged from:
# * Andy Baker
# * Federico Tarantini
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: webfaction_app
short_description: Add or remove applications on a Webfaction host
description:
- Add or remove applications on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction.
author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0"
notes:
- "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
- See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
options:
name:
description:
- The name of the application
required: true
state:
description:
- Whether the application should exist
required: false
choices: ['present', 'absent']
default: "present"
type:
description:
- The type of application to create. See the Webfaction docs at http://docs.webfaction.com/xmlrpc-api/apps.html for a list.
required: true
autostart:
description:
- Whether the app should restart with an autostart.cgi script
required: false
default: "no"
extra_info:
description:
- Any extra parameters required by the app
required: false
default: null
open_port:
required: false
default: false
login_name:
description:
- The webfaction account to use
required: true
login_password:
description:
- The webfaction password to use
required: true
machine:
description:
- The machine name to use (optional for accounts with only one machine)
required: false
'''
EXAMPLES = '''
- name: Create a test app
webfaction_app:
name="my_wsgi_app1"
state=present
type=mod_wsgi35-python27
login_name={{webfaction_user}}
login_password={{webfaction_passwd}}
machine={{webfaction_machine}}
'''
import xmlrpclib
webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(required=False, choices=['present', 'absent'], default='present'),
type = dict(required=True),
autostart = dict(required=False, choices=BOOLEANS, default=False),
extra_info = dict(required=False, default=""),
port_open = dict(required=False, choices=BOOLEANS, default=False),
login_name = dict(required=True),
login_password = dict(required=True),
machine = dict(required=False, default=False),
),
supports_check_mode=True
)
app_name = module.params['name']
app_type = module.params['type']
app_state = module.params['state']
if module.params['machine']:
session_id, account = webfaction.login(
module.params['login_name'],
module.params['login_password'],
module.params['machine']
)
else:
session_id, account = webfaction.login(
module.params['login_name'],
module.params['login_password']
)
app_list = webfaction.list_apps(session_id)
app_map = dict([(i['name'], i) for i in app_list])
existing_app = app_map.get(app_name)
result = {}
# Here's where the real stuff happens
if app_state == 'present':
# Does an app with this name already exist?
if existing_app:
if existing_app['type'] != app_type:
module.fail_json(msg="App already exists with different type. Please fix by hand.")
# If it exists with the right type, we don't change it
# Should check other parameters.
module.exit_json(
changed = False,
)
if not module.check_mode:
# If this isn't a dry run, create the app
result.update(
webfaction.create_app(
session_id, app_name, app_type,
module.boolean(module.params['autostart']),
module.params['extra_info'],
module.boolean(module.params['port_open'])
)
)
elif app_state == 'absent':
# If the app's already not there, nothing changed.
if not existing_app:
module.exit_json(
changed = False,
)
if not module.check_mode:
# If this isn't a dry run, delete the app
result.update(
webfaction.delete_app(session_id, app_name)
)
else:
module.fail_json(msg="Unknown state specified: {}".format(app_state))
module.exit_json(
changed = True,
result = result
)
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
Chibin/gpdb | gpMgmt/bin/gppylib/testold/testDriver.py | 4 | 7987 | import sys
from gppylib.gplog import *
from gppylib.system.configurationInterface import *
from gppylib.system import configurationImplTest, fileSystemImplTest, fileSystemInterface, osInterface, osImplTest, \
faultProberInterface, faultProberImplTest
from gppylib.gparray import Segment
logger = get_default_logger()
class TestDriver:
def __init__(self):
self.__configurationProvider = None
self.__fileSystemProvider = None
pass
#
#
# To get segmentData you can run this query against a 3.4 database:
#
# SELECT dbid, content, role, preferred_role, mode, status, hostname, address, port,
# fselocation AS datadir, replication_port
# FROM pg_catalog.gp_segment_configuration
# JOIN pg_catalog.pg_filespace_entry ON (dbid = fsedbid)
# JOIN pg_catalog.pg_filespace fs ON (fsefsoid = fs.oid AND fsname='pg_system')
# ORDER BY content, preferred_role DESC
#
# Include the header in what you paste!
#
#
def setSegments(self, segmentData):
lines = segmentData.strip().split("\n")
assert len(lines[1].split("+")) == len(lines[0].split("|")) # verify header is listed
self.__configurationProvider = configurationImplTest.GpConfigurationProviderForTesting()
for line in lines[2:len(lines)]:
row = [s.strip() for s in line.strip().split("|")]
dbId = int(row[0])
contentId = int(row[1])
role = row[2]
preferredRole = row[3]
mode = row[4]
status = row[5]
hostName = row[6]
address = row[7]
port = int(row[8])
dataDirectory = row[9]
replicationPort = None if row[10] == "" else int(row[10])
segment = Segment(content=contentId,
preferred_role=preferredRole,
dbid=dbId,
role=role,
mode=mode,
status=status,
hostname=hostName,
address=address,
port=port,
datadir=dataDirectory,
replicationPort=replicationPort)
self.__configurationProvider.addTestSegment(segment)
registerConfigurationProvider( self.__configurationProvider )
self.__fileSystemProvider = fileSystemImplTest.GpFileSystemProviderForTest()
fileSystemInterface.registerFileSystemProvider(self.__fileSystemProvider)
osInterface.registerOsProvider(osImplTest.GpOsProviderForTest())
faultProberInterface.registerFaultProber(faultProberImplTest.GpFaultProberImplForTest())
pass
def getFileSystem(self):
return self.__fileSystemProvider
def getConfiguration(self):
return self.__configurationProvider
def initOneHostConfiguration(self):
configStr = \
"""
dbid | content | role | preferred_role | mode | status | hostname | address | port | datadir | replication_port
------+---------+------+----------------+------+--------+--------------------+--------------------+-------+-------------------------------------------------+------------------
1 | -1 | p | p | s | u | this-is-my-host | this-is-my-host | 5432 |/datadirpathdbmaster/gp-1 |
2 | 0 | p | p | s | u | this-is-my-host | this-is-my-host | 50001 |/datadirpathdbfast1/gp0 | 55001
4 | 0 | m | m | s | u | this-is-my-host | this-is-my-host | 60001 |/datadirpathdbfast3/gp0 | 65001
3 | 1 | p | p | s | u | this-is-my-host | this-is-my-host | 50002 |/datadirpathdbfast2/gp1 | 55002
5 | 1 | m | m | s | u | this-is-my-host | this-is-my-host | 60002 |/datadirpathdbfast4/gp1 | 65002
"""
self.setSegments(configStr)
return self
def initTwoSegmentOneFailedMirrorConfiguration(self):
configStr = \
"""
dbid | content | role | preferred_role | mode | status | hostname | address | port | datadir | replication_port
------+---------+------+----------------+------+--------+--------------------+--------------------+-------+-------------------------------------------------+------------------
1 | -1 | p | p | s | u | master-host | primary-host | 5432 |/datadirpathdbmaster/gp-1 |
2 | 0 | p | p | s | u | first-host | first-host | 50001 |/datadirpathdbfast1/gp0 | 55001
7 | 0 | m | m | s | u | second-host | second-host | 40001 |/second/datadirpathdbfast3/gp0 | 45001
3 | 1 | p | p | s | u | first-host | first-host | 50002 |/datadirpathdbfast2/gp1 | 55002
9 | 1 | m | m | s | u | second-host | second-host | 40002 |/second/datadirpathdbfast4/gp1 | 45002
4 | 2 | m | m | s | u | first-host | first-host | 60001 |/datadirpathdbfast3/gp0 | 65001
6 | 2 | p | p | s | u | second-host | second-host | 30001 |/second/datadirpathdbfast1/gp0 | 35001
5 | 3 | m | m | c | d | first-host | first-host | 60002 |/datadirpathdbfast4/gp1 | 65002
8 | 3 | p | p | c | u | second-host | second-host | 30002 |/second/datadirpathdbfast2/gp1 | 35002
"""
self.setSegments(configStr)
return self
def initThreeHostMultiHomeNoMirrors(self):
configStr = \
"""
dbid | content | role | preferred_role | mode | status | hostname | address | port | datadir | replication_port
------+---------+------+----------------+------+--------+--------------------+--------------------+-------+-------------------------------------------------+------------------
1 | -1 | p | p | s | u | master-host | primary-host | 5432 |/datadirpathdbmaster/gp-1 |
2 | 0 | p | p | s | u | first-host | first-host-1 | 50001 |/first/datadirpathdbfast1/gp0 |
3 | 1 | p | p | s | u | first-host | first-host-2 | 50002 |/first/datadirpathdbfast2/gp1 |
4 | 2 | p | p | s | u | second-host | second-host-1 | 50001 |/second/datadirpathdbfast1/gp2 |
5 | 3 | p | p | s | u | second-host | second-host-2 | 50002 |/second/datadirpathdbfast2/gp3 |
6 | 4 | p | p | s | u | third-host | third-host-1 | 50001 |/third/datadirpathdbfast2/gp4 |
7 | 5 | p | p | s | u | third-host | third-host-2 | 50002 |/third/datadirpathdbfast2/gp5 |
"""
self.setSegments(configStr)
return self
| apache-2.0 |
lidiamcfreitas/FenixScheduleMaker | ScheduleMaker/brython/www/src/Lib/encodings/cp875.py | 37 | 13161 | """ Python Character Mapping Codec cp875 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP875.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp875',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x9c' # 0x04 -> CONTROL
'\t' # 0x05 -> HORIZONTAL TABULATION
'\x86' # 0x06 -> CONTROL
'\x7f' # 0x07 -> DELETE
'\x97' # 0x08 -> CONTROL
'\x8d' # 0x09 -> CONTROL
'\x8e' # 0x0A -> CONTROL
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x9d' # 0x14 -> CONTROL
'\x85' # 0x15 -> CONTROL
'\x08' # 0x16 -> BACKSPACE
'\x87' # 0x17 -> CONTROL
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x92' # 0x1A -> CONTROL
'\x8f' # 0x1B -> CONTROL
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
'\x80' # 0x20 -> CONTROL
'\x81' # 0x21 -> CONTROL
'\x82' # 0x22 -> CONTROL
'\x83' # 0x23 -> CONTROL
'\x84' # 0x24 -> CONTROL
'\n' # 0x25 -> LINE FEED
'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
'\x1b' # 0x27 -> ESCAPE
'\x88' # 0x28 -> CONTROL
'\x89' # 0x29 -> CONTROL
'\x8a' # 0x2A -> CONTROL
'\x8b' # 0x2B -> CONTROL
'\x8c' # 0x2C -> CONTROL
'\x05' # 0x2D -> ENQUIRY
'\x06' # 0x2E -> ACKNOWLEDGE
'\x07' # 0x2F -> BELL
'\x90' # 0x30 -> CONTROL
'\x91' # 0x31 -> CONTROL
'\x16' # 0x32 -> SYNCHRONOUS IDLE
'\x93' # 0x33 -> CONTROL
'\x94' # 0x34 -> CONTROL
'\x95' # 0x35 -> CONTROL
'\x96' # 0x36 -> CONTROL
'\x04' # 0x37 -> END OF TRANSMISSION
'\x98' # 0x38 -> CONTROL
'\x99' # 0x39 -> CONTROL
'\x9a' # 0x3A -> CONTROL
'\x9b' # 0x3B -> CONTROL
'\x14' # 0x3C -> DEVICE CONTROL FOUR
'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
'\x9e' # 0x3E -> CONTROL
'\x1a' # 0x3F -> SUBSTITUTE
' ' # 0x40 -> SPACE
'\u0391' # 0x41 -> GREEK CAPITAL LETTER ALPHA
'\u0392' # 0x42 -> GREEK CAPITAL LETTER BETA
'\u0393' # 0x43 -> GREEK CAPITAL LETTER GAMMA
'\u0394' # 0x44 -> GREEK CAPITAL LETTER DELTA
'\u0395' # 0x45 -> GREEK CAPITAL LETTER EPSILON
'\u0396' # 0x46 -> GREEK CAPITAL LETTER ZETA
'\u0397' # 0x47 -> GREEK CAPITAL LETTER ETA
'\u0398' # 0x48 -> GREEK CAPITAL LETTER THETA
'\u0399' # 0x49 -> GREEK CAPITAL LETTER IOTA
'[' # 0x4A -> LEFT SQUARE BRACKET
'.' # 0x4B -> FULL STOP
'<' # 0x4C -> LESS-THAN SIGN
'(' # 0x4D -> LEFT PARENTHESIS
'+' # 0x4E -> PLUS SIGN
'!' # 0x4F -> EXCLAMATION MARK
'&' # 0x50 -> AMPERSAND
'\u039a' # 0x51 -> GREEK CAPITAL LETTER KAPPA
'\u039b' # 0x52 -> GREEK CAPITAL LETTER LAMDA
'\u039c' # 0x53 -> GREEK CAPITAL LETTER MU
'\u039d' # 0x54 -> GREEK CAPITAL LETTER NU
'\u039e' # 0x55 -> GREEK CAPITAL LETTER XI
'\u039f' # 0x56 -> GREEK CAPITAL LETTER OMICRON
'\u03a0' # 0x57 -> GREEK CAPITAL LETTER PI
'\u03a1' # 0x58 -> GREEK CAPITAL LETTER RHO
'\u03a3' # 0x59 -> GREEK CAPITAL LETTER SIGMA
']' # 0x5A -> RIGHT SQUARE BRACKET
'$' # 0x5B -> DOLLAR SIGN
'*' # 0x5C -> ASTERISK
')' # 0x5D -> RIGHT PARENTHESIS
';' # 0x5E -> SEMICOLON
'^' # 0x5F -> CIRCUMFLEX ACCENT
'-' # 0x60 -> HYPHEN-MINUS
'/' # 0x61 -> SOLIDUS
'\u03a4' # 0x62 -> GREEK CAPITAL LETTER TAU
'\u03a5' # 0x63 -> GREEK CAPITAL LETTER UPSILON
'\u03a6' # 0x64 -> GREEK CAPITAL LETTER PHI
'\u03a7' # 0x65 -> GREEK CAPITAL LETTER CHI
'\u03a8' # 0x66 -> GREEK CAPITAL LETTER PSI
'\u03a9' # 0x67 -> GREEK CAPITAL LETTER OMEGA
'\u03aa' # 0x68 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
'\u03ab' # 0x69 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
'|' # 0x6A -> VERTICAL LINE
',' # 0x6B -> COMMA
'%' # 0x6C -> PERCENT SIGN
'_' # 0x6D -> LOW LINE
'>' # 0x6E -> GREATER-THAN SIGN
'?' # 0x6F -> QUESTION MARK
'\xa8' # 0x70 -> DIAERESIS
'\u0386' # 0x71 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
'\u0388' # 0x72 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
'\u0389' # 0x73 -> GREEK CAPITAL LETTER ETA WITH TONOS
'\xa0' # 0x74 -> NO-BREAK SPACE
'\u038a' # 0x75 -> GREEK CAPITAL LETTER IOTA WITH TONOS
'\u038c' # 0x76 -> GREEK CAPITAL LETTER OMICRON WITH TONOS
'\u038e' # 0x77 -> GREEK CAPITAL LETTER UPSILON WITH TONOS
'\u038f' # 0x78 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
'`' # 0x79 -> GRAVE ACCENT
':' # 0x7A -> COLON
'#' # 0x7B -> NUMBER SIGN
'@' # 0x7C -> COMMERCIAL AT
"'" # 0x7D -> APOSTROPHE
'=' # 0x7E -> EQUALS SIGN
'"' # 0x7F -> QUOTATION MARK
'\u0385' # 0x80 -> GREEK DIALYTIKA TONOS
'a' # 0x81 -> LATIN SMALL LETTER A
'b' # 0x82 -> LATIN SMALL LETTER B
'c' # 0x83 -> LATIN SMALL LETTER C
'd' # 0x84 -> LATIN SMALL LETTER D
'e' # 0x85 -> LATIN SMALL LETTER E
'f' # 0x86 -> LATIN SMALL LETTER F
'g' # 0x87 -> LATIN SMALL LETTER G
'h' # 0x88 -> LATIN SMALL LETTER H
'i' # 0x89 -> LATIN SMALL LETTER I
'\u03b1' # 0x8A -> GREEK SMALL LETTER ALPHA
'\u03b2' # 0x8B -> GREEK SMALL LETTER BETA
'\u03b3' # 0x8C -> GREEK SMALL LETTER GAMMA
'\u03b4' # 0x8D -> GREEK SMALL LETTER DELTA
'\u03b5' # 0x8E -> GREEK SMALL LETTER EPSILON
'\u03b6' # 0x8F -> GREEK SMALL LETTER ZETA
'\xb0' # 0x90 -> DEGREE SIGN
'j' # 0x91 -> LATIN SMALL LETTER J
'k' # 0x92 -> LATIN SMALL LETTER K
'l' # 0x93 -> LATIN SMALL LETTER L
'm' # 0x94 -> LATIN SMALL LETTER M
'n' # 0x95 -> LATIN SMALL LETTER N
'o' # 0x96 -> LATIN SMALL LETTER O
'p' # 0x97 -> LATIN SMALL LETTER P
'q' # 0x98 -> LATIN SMALL LETTER Q
'r' # 0x99 -> LATIN SMALL LETTER R
'\u03b7' # 0x9A -> GREEK SMALL LETTER ETA
'\u03b8' # 0x9B -> GREEK SMALL LETTER THETA
'\u03b9' # 0x9C -> GREEK SMALL LETTER IOTA
'\u03ba' # 0x9D -> GREEK SMALL LETTER KAPPA
'\u03bb' # 0x9E -> GREEK SMALL LETTER LAMDA
'\u03bc' # 0x9F -> GREEK SMALL LETTER MU
'\xb4' # 0xA0 -> ACUTE ACCENT
'~' # 0xA1 -> TILDE
's' # 0xA2 -> LATIN SMALL LETTER S
't' # 0xA3 -> LATIN SMALL LETTER T
'u' # 0xA4 -> LATIN SMALL LETTER U
'v' # 0xA5 -> LATIN SMALL LETTER V
'w' # 0xA6 -> LATIN SMALL LETTER W
'x' # 0xA7 -> LATIN SMALL LETTER X
'y' # 0xA8 -> LATIN SMALL LETTER Y
'z' # 0xA9 -> LATIN SMALL LETTER Z
'\u03bd' # 0xAA -> GREEK SMALL LETTER NU
'\u03be' # 0xAB -> GREEK SMALL LETTER XI
'\u03bf' # 0xAC -> GREEK SMALL LETTER OMICRON
'\u03c0' # 0xAD -> GREEK SMALL LETTER PI
'\u03c1' # 0xAE -> GREEK SMALL LETTER RHO
'\u03c3' # 0xAF -> GREEK SMALL LETTER SIGMA
'\xa3' # 0xB0 -> POUND SIGN
'\u03ac' # 0xB1 -> GREEK SMALL LETTER ALPHA WITH TONOS
'\u03ad' # 0xB2 -> GREEK SMALL LETTER EPSILON WITH TONOS
'\u03ae' # 0xB3 -> GREEK SMALL LETTER ETA WITH TONOS
'\u03ca' # 0xB4 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
'\u03af' # 0xB5 -> GREEK SMALL LETTER IOTA WITH TONOS
'\u03cc' # 0xB6 -> GREEK SMALL LETTER OMICRON WITH TONOS
'\u03cd' # 0xB7 -> GREEK SMALL LETTER UPSILON WITH TONOS
'\u03cb' # 0xB8 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
'\u03ce' # 0xB9 -> GREEK SMALL LETTER OMEGA WITH TONOS
'\u03c2' # 0xBA -> GREEK SMALL LETTER FINAL SIGMA
'\u03c4' # 0xBB -> GREEK SMALL LETTER TAU
'\u03c5' # 0xBC -> GREEK SMALL LETTER UPSILON
'\u03c6' # 0xBD -> GREEK SMALL LETTER PHI
'\u03c7' # 0xBE -> GREEK SMALL LETTER CHI
'\u03c8' # 0xBF -> GREEK SMALL LETTER PSI
'{' # 0xC0 -> LEFT CURLY BRACKET
'A' # 0xC1 -> LATIN CAPITAL LETTER A
'B' # 0xC2 -> LATIN CAPITAL LETTER B
'C' # 0xC3 -> LATIN CAPITAL LETTER C
'D' # 0xC4 -> LATIN CAPITAL LETTER D
'E' # 0xC5 -> LATIN CAPITAL LETTER E
'F' # 0xC6 -> LATIN CAPITAL LETTER F
'G' # 0xC7 -> LATIN CAPITAL LETTER G
'H' # 0xC8 -> LATIN CAPITAL LETTER H
'I' # 0xC9 -> LATIN CAPITAL LETTER I
'\xad' # 0xCA -> SOFT HYPHEN
'\u03c9' # 0xCB -> GREEK SMALL LETTER OMEGA
'\u0390' # 0xCC -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
'\u03b0' # 0xCD -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
'\u2018' # 0xCE -> LEFT SINGLE QUOTATION MARK
'\u2015' # 0xCF -> HORIZONTAL BAR
'}' # 0xD0 -> RIGHT CURLY BRACKET
'J' # 0xD1 -> LATIN CAPITAL LETTER J
'K' # 0xD2 -> LATIN CAPITAL LETTER K
'L' # 0xD3 -> LATIN CAPITAL LETTER L
'M' # 0xD4 -> LATIN CAPITAL LETTER M
'N' # 0xD5 -> LATIN CAPITAL LETTER N
'O' # 0xD6 -> LATIN CAPITAL LETTER O
'P' # 0xD7 -> LATIN CAPITAL LETTER P
'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
'R' # 0xD9 -> LATIN CAPITAL LETTER R
'\xb1' # 0xDA -> PLUS-MINUS SIGN
'\xbd' # 0xDB -> VULGAR FRACTION ONE HALF
'\x1a' # 0xDC -> SUBSTITUTE
'\u0387' # 0xDD -> GREEK ANO TELEIA
'\u2019' # 0xDE -> RIGHT SINGLE QUOTATION MARK
'\xa6' # 0xDF -> BROKEN BAR
'\\' # 0xE0 -> REVERSE SOLIDUS
'\x1a' # 0xE1 -> SUBSTITUTE
'S' # 0xE2 -> LATIN CAPITAL LETTER S
'T' # 0xE3 -> LATIN CAPITAL LETTER T
'U' # 0xE4 -> LATIN CAPITAL LETTER U
'V' # 0xE5 -> LATIN CAPITAL LETTER V
'W' # 0xE6 -> LATIN CAPITAL LETTER W
'X' # 0xE7 -> LATIN CAPITAL LETTER X
'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
'\xb2' # 0xEA -> SUPERSCRIPT TWO
'\xa7' # 0xEB -> SECTION SIGN
'\x1a' # 0xEC -> SUBSTITUTE
'\x1a' # 0xED -> SUBSTITUTE
'\xab' # 0xEE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xEF -> NOT SIGN
'0' # 0xF0 -> DIGIT ZERO
'1' # 0xF1 -> DIGIT ONE
'2' # 0xF2 -> DIGIT TWO
'3' # 0xF3 -> DIGIT THREE
'4' # 0xF4 -> DIGIT FOUR
'5' # 0xF5 -> DIGIT FIVE
'6' # 0xF6 -> DIGIT SIX
'7' # 0xF7 -> DIGIT SEVEN
'8' # 0xF8 -> DIGIT EIGHT
'9' # 0xF9 -> DIGIT NINE
'\xb3' # 0xFA -> SUPERSCRIPT THREE
'\xa9' # 0xFB -> COPYRIGHT SIGN
'\x1a' # 0xFC -> SUBSTITUTE
'\x1a' # 0xFD -> SUBSTITUTE
'\xbb' # 0xFE -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| bsd-2-clause |
etuna-SBF-kog/Stadsparken | env/lib/python2.7/site-packages/requests/packages/urllib3/contrib/pyopenssl.py | 102 | 5235 | '''SSL with SNI-support for Python 2.
This needs the following packages installed:
* pyOpenSSL (tested with 0.13)
* ndg-httpsclient (tested with 0.3.2)
* pyasn1 (tested with 0.1.6)
To activate it call :func:`~urllib3.contrib.pyopenssl.inject_into_urllib3`.
This can be done in a ``sitecustomize`` module, or at any other time before
your application begins using ``urllib3``, like this::
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
'''
from ndg.httpsclient.ssl_peer_verification import (ServerSSLCertVerification,
SUBJ_ALT_NAME_SUPPORT)
from ndg.httpsclient.subj_alt_name import SubjectAltName
import OpenSSL.SSL
from pyasn1.codec.der import decoder as der_decoder
from socket import _fileobject
import ssl
from .. import connectionpool
from .. import util
__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
# SNI only *really* works if we can read the subjectAltName of certificates.
HAS_SNI = SUBJ_ALT_NAME_SUPPORT
# Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = {
ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER
+ OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
orig_util_HAS_SNI = util.HAS_SNI
orig_connectionpool_ssl_wrap_socket = connectionpool.ssl_wrap_socket
def inject_into_urllib3():
'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
connectionpool.ssl_wrap_socket = ssl_wrap_socket
util.HAS_SNI = HAS_SNI
def extract_from_urllib3():
'Undo monkey-patching by :func:`inject_into_urllib3`.'
connectionpool.ssl_wrap_socket = orig_connectionpool_ssl_wrap_socket
util.HAS_SNI = orig_util_HAS_SNI
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
def get_subj_alt_name(peer_cert):
# Search through extensions
dns_name = []
if not SUBJ_ALT_NAME_SUPPORT:
return dns_name
general_names = SubjectAltName()
for i in range(peer_cert.get_extension_count()):
ext = peer_cert.get_extension(i)
ext_name = ext.get_short_name()
if ext_name != 'subjectAltName':
continue
# PyOpenSSL returns extension data in ASN.1 encoded form
ext_dat = ext.get_data()
decoded_dat = der_decoder.decode(ext_dat,
asn1Spec=general_names)
for name in decoded_dat:
if not isinstance(name, SubjectAltName):
continue
for entry in range(len(name)):
component = name.getComponentByPosition(entry)
if component.getName() != 'dNSName':
continue
dns_name.append(str(component.getComponent()))
return dns_name
class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.'''
def __init__(self, connection, socket):
self.connection = connection
self.socket = socket
def makefile(self, mode, bufsize=-1):
return _fileobject(self.connection, mode, bufsize)
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def sendall(self, data):
return self.connection.sendall(data)
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
raise ssl.SSLError('')
if binary_form:
return OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1,
x509)
return {
'subject': (
(('commonName', x509.get_subject().CN),),
),
'subjectAltName': [
('DNS', value)
for value in get_subj_alt_name(x509)
]
}
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
ctx = OpenSSL.SSL.Context(_openssl_versions[ssl_version])
if certfile:
ctx.use_certificate_file(certfile)
if keyfile:
ctx.use_privatekey_file(keyfile)
if cert_reqs != ssl.CERT_NONE:
ctx.set_verify(_openssl_verify[cert_reqs], _verify_callback)
if ca_certs:
try:
ctx.load_verify_locations(ca_certs, None)
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e)
cnx = OpenSSL.SSL.Connection(ctx, sock)
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
try:
cnx.do_handshake()
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake', e)
return WrappedSocket(cnx, sock)
| gpl-3.0 |
SnappleCap/oh-mainline | vendor/packages/Django/django/contrib/admin/validation.py | 100 | 20755 | from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.forms.models import (BaseModelForm, BaseModelFormSet, fields_for_model,
_get_foreign_key)
from django.contrib.admin import ListFilter, FieldListFilter
from django.contrib.admin.util import get_fields_from_path, NotRelationField
from django.contrib.admin.options import (flatten_fieldsets, BaseModelAdmin,
ModelAdmin, HORIZONTAL, VERTICAL)
__all__ = ['validate']
def validate(cls, model):
"""
Does basic ModelAdmin option validation. Calls custom validation
classmethod in the end if it is provided in cls. The signature of the
custom validation classmethod should be: def validate(cls, model).
"""
# Before we can introspect models, they need to be fully loaded so that
# inter-relations are set up correctly. We force that here.
models.get_apps()
opts = model._meta
validate_base(cls, model)
# list_display
if hasattr(cls, 'list_display'):
check_isseq(cls, 'list_display', cls.list_display)
for idx, field in enumerate(cls.list_display):
if not callable(field):
if not hasattr(cls, field):
if not hasattr(model, field):
try:
opts.get_field(field)
except models.FieldDoesNotExist:
raise ImproperlyConfigured("%s.list_display[%d], %r is not a callable or an attribute of %r or found in the model %r."
% (cls.__name__, idx, field, cls.__name__, model._meta.object_name))
else:
# getattr(model, field) could be an X_RelatedObjectsDescriptor
f = fetch_attr(cls, model, opts, "list_display[%d]" % idx, field)
if isinstance(f, models.ManyToManyField):
raise ImproperlyConfigured("'%s.list_display[%d]', '%s' is a ManyToManyField which is not supported."
% (cls.__name__, idx, field))
# list_display_links
if hasattr(cls, 'list_display_links'):
check_isseq(cls, 'list_display_links', cls.list_display_links)
for idx, field in enumerate(cls.list_display_links):
if field not in cls.list_display:
raise ImproperlyConfigured("'%s.list_display_links[%d]' "
"refers to '%s' which is not defined in 'list_display'."
% (cls.__name__, idx, field))
# list_filter
if hasattr(cls, 'list_filter'):
check_isseq(cls, 'list_filter', cls.list_filter)
for idx, item in enumerate(cls.list_filter):
# There are three options for specifying a filter:
# 1: 'field' - a basic field filter, possibly w/ relationships (eg, 'field__rel')
# 2: ('field', SomeFieldListFilter) - a field-based list filter class
# 3: SomeListFilter - a non-field list filter class
if callable(item) and not isinstance(item, models.Field):
# If item is option 3, it should be a ListFilter...
if not issubclass(item, ListFilter):
raise ImproperlyConfigured("'%s.list_filter[%d]' is '%s'"
" which is not a descendant of ListFilter."
% (cls.__name__, idx, item.__name__))
# ... but not a FieldListFilter.
if issubclass(item, FieldListFilter):
raise ImproperlyConfigured("'%s.list_filter[%d]' is '%s'"
" which is of type FieldListFilter but is not"
" associated with a field name."
% (cls.__name__, idx, item.__name__))
else:
if isinstance(item, (tuple, list)):
# item is option #2
field, list_filter_class = item
if not issubclass(list_filter_class, FieldListFilter):
raise ImproperlyConfigured("'%s.list_filter[%d][1]'"
" is '%s' which is not of type FieldListFilter."
% (cls.__name__, idx, list_filter_class.__name__))
else:
# item is option #1
field = item
# Validate the field string
try:
get_fields_from_path(model, field)
except (NotRelationField, FieldDoesNotExist):
raise ImproperlyConfigured("'%s.list_filter[%d]' refers to '%s'"
" which does not refer to a Field."
% (cls.__name__, idx, field))
# list_per_page = 100
if hasattr(cls, 'list_per_page') and not isinstance(cls.list_per_page, int):
raise ImproperlyConfigured("'%s.list_per_page' should be a integer."
% cls.__name__)
# list_max_show_all
if hasattr(cls, 'list_max_show_all') and not isinstance(cls.list_max_show_all, int):
raise ImproperlyConfigured("'%s.list_max_show_all' should be an integer."
% cls.__name__)
# list_editable
if hasattr(cls, 'list_editable') and cls.list_editable:
check_isseq(cls, 'list_editable', cls.list_editable)
for idx, field_name in enumerate(cls.list_editable):
try:
field = opts.get_field_by_name(field_name)[0]
except models.FieldDoesNotExist:
raise ImproperlyConfigured("'%s.list_editable[%d]' refers to a "
"field, '%s', not defined on %s.%s."
% (cls.__name__, idx, field_name, model._meta.app_label, model.__name__))
if field_name not in cls.list_display:
raise ImproperlyConfigured("'%s.list_editable[%d]' refers to "
"'%s' which is not defined in 'list_display'."
% (cls.__name__, idx, field_name))
if field_name in cls.list_display_links:
raise ImproperlyConfigured("'%s' cannot be in both '%s.list_editable'"
" and '%s.list_display_links'"
% (field_name, cls.__name__, cls.__name__))
if not cls.list_display_links and cls.list_display[0] in cls.list_editable:
raise ImproperlyConfigured("'%s.list_editable[%d]' refers to"
" the first field in list_display, '%s', which can't be"
" used unless list_display_links is set."
% (cls.__name__, idx, cls.list_display[0]))
if not field.editable:
raise ImproperlyConfigured("'%s.list_editable[%d]' refers to a "
"field, '%s', which isn't editable through the admin."
% (cls.__name__, idx, field_name))
# search_fields = ()
if hasattr(cls, 'search_fields'):
check_isseq(cls, 'search_fields', cls.search_fields)
# date_hierarchy = None
if cls.date_hierarchy:
f = get_field(cls, model, opts, 'date_hierarchy', cls.date_hierarchy)
if not isinstance(f, (models.DateField, models.DateTimeField)):
raise ImproperlyConfigured("'%s.date_hierarchy is "
"neither an instance of DateField nor DateTimeField."
% cls.__name__)
# ordering = None
if cls.ordering:
check_isseq(cls, 'ordering', cls.ordering)
for idx, field in enumerate(cls.ordering):
if field == '?' and len(cls.ordering) != 1:
raise ImproperlyConfigured("'%s.ordering' has the random "
"ordering marker '?', but contains other fields as "
"well. Please either remove '?' or the other fields."
% cls.__name__)
if field == '?':
continue
if field.startswith('-'):
field = field[1:]
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
if '__' in field:
continue
get_field(cls, model, opts, 'ordering[%d]' % idx, field)
if hasattr(cls, "readonly_fields"):
check_readonly_fields(cls, model, opts)
# list_select_related = False
# save_as = False
# save_on_top = False
for attr in ('list_select_related', 'save_as', 'save_on_top'):
if not isinstance(getattr(cls, attr), bool):
raise ImproperlyConfigured("'%s.%s' should be a boolean."
% (cls.__name__, attr))
# inlines = []
if hasattr(cls, 'inlines'):
check_isseq(cls, 'inlines', cls.inlines)
for idx, inline in enumerate(cls.inlines):
if not issubclass(inline, BaseModelAdmin):
raise ImproperlyConfigured("'%s.inlines[%d]' does not inherit "
"from BaseModelAdmin." % (cls.__name__, idx))
if not inline.model:
raise ImproperlyConfigured("'model' is a required attribute "
"of '%s.inlines[%d]'." % (cls.__name__, idx))
if not issubclass(inline.model, models.Model):
raise ImproperlyConfigured("'%s.inlines[%d].model' does not "
"inherit from models.Model." % (cls.__name__, idx))
validate_base(inline, inline.model)
validate_inline(inline, cls, model)
def validate_inline(cls, parent, parent_model):
# model is already verified to exist and be a Model
if cls.fk_name: # default value is None
f = get_field(cls, cls.model, cls.model._meta, 'fk_name', cls.fk_name)
if not isinstance(f, models.ForeignKey):
raise ImproperlyConfigured("'%s.fk_name is not an instance of "
"models.ForeignKey." % cls.__name__)
fk = _get_foreign_key(parent_model, cls.model, fk_name=cls.fk_name, can_fail=True)
# extra = 3
if not isinstance(cls.extra, int):
raise ImproperlyConfigured("'%s.extra' should be a integer."
% cls.__name__)
# max_num = None
max_num = getattr(cls, 'max_num', None)
if max_num is not None and not isinstance(max_num, int):
raise ImproperlyConfigured("'%s.max_num' should be an integer or None (default)."
% cls.__name__)
# formset
if hasattr(cls, 'formset') and not issubclass(cls.formset, BaseModelFormSet):
raise ImproperlyConfigured("'%s.formset' does not inherit from "
"BaseModelFormSet." % cls.__name__)
# exclude
if hasattr(cls, 'exclude') and cls.exclude:
if fk and fk.name in cls.exclude:
raise ImproperlyConfigured("%s cannot exclude the field "
"'%s' - this is the foreign key to the parent model "
"%s.%s." % (cls.__name__, fk.name, parent_model._meta.app_label, parent_model.__name__))
if hasattr(cls, "readonly_fields"):
check_readonly_fields(cls, cls.model, cls.model._meta)
def validate_fields_spec(cls, model, opts, flds, label):
"""
Validate the fields specification in `flds` from a ModelAdmin subclass
`cls` for the `model` model. `opts` is `model`'s Meta inner class.
Use `label` for reporting problems to the user.
The fields specification can be a ``fields`` option or a ``fields``
sub-option from a ``fieldsets`` option component.
"""
for fields in flds:
# The entry in fields might be a tuple. If it is a standalone
# field, make it into a tuple to make processing easier.
if type(fields) != tuple:
fields = (fields,)
for field in fields:
if field in cls.readonly_fields:
# Stuff can be put in fields that isn't actually a
# model field if it's in readonly_fields,
# readonly_fields will handle the validation of such
# things.
continue
check_formfield(cls, model, opts, label, field)
try:
f = opts.get_field(field)
except models.FieldDoesNotExist:
# If we can't find a field on the model that matches, it could be an
# extra field on the form; nothing to check so move on to the next field.
continue
if isinstance(f, models.ManyToManyField) and not f.rel.through._meta.auto_created:
raise ImproperlyConfigured("'%s.%s' "
"can't include the ManyToManyField field '%s' because "
"'%s' manually specifies a 'through' model." % (
cls.__name__, label, field, field))
def validate_base(cls, model):
opts = model._meta
# raw_id_fields
if hasattr(cls, 'raw_id_fields'):
check_isseq(cls, 'raw_id_fields', cls.raw_id_fields)
for idx, field in enumerate(cls.raw_id_fields):
f = get_field(cls, model, opts, 'raw_id_fields', field)
if not isinstance(f, (models.ForeignKey, models.ManyToManyField)):
raise ImproperlyConfigured("'%s.raw_id_fields[%d]', '%s' must "
"be either a ForeignKey or ManyToManyField."
% (cls.__name__, idx, field))
# fields
if cls.fields: # default value is None
check_isseq(cls, 'fields', cls.fields)
validate_fields_spec(cls, model, opts, cls.fields, 'fields')
if cls.fieldsets:
raise ImproperlyConfigured('Both fieldsets and fields are specified in %s.' % cls.__name__)
if len(cls.fields) > len(set(cls.fields)):
raise ImproperlyConfigured('There are duplicate field(s) in %s.fields' % cls.__name__)
# fieldsets
if cls.fieldsets: # default value is None
check_isseq(cls, 'fieldsets', cls.fieldsets)
for idx, fieldset in enumerate(cls.fieldsets):
check_isseq(cls, 'fieldsets[%d]' % idx, fieldset)
if len(fieldset) != 2:
raise ImproperlyConfigured("'%s.fieldsets[%d]' does not "
"have exactly two elements." % (cls.__name__, idx))
check_isdict(cls, 'fieldsets[%d][1]' % idx, fieldset[1])
if 'fields' not in fieldset[1]:
raise ImproperlyConfigured("'fields' key is required in "
"%s.fieldsets[%d][1] field options dict."
% (cls.__name__, idx))
validate_fields_spec(cls, model, opts, fieldset[1]['fields'], "fieldsets[%d][1]['fields']" % idx)
flattened_fieldsets = flatten_fieldsets(cls.fieldsets)
if len(flattened_fieldsets) > len(set(flattened_fieldsets)):
raise ImproperlyConfigured('There are duplicate field(s) in %s.fieldsets' % cls.__name__)
# exclude
if cls.exclude: # default value is None
check_isseq(cls, 'exclude', cls.exclude)
for field in cls.exclude:
check_formfield(cls, model, opts, 'exclude', field)
try:
f = opts.get_field(field)
except models.FieldDoesNotExist:
# If we can't find a field on the model that matches,
# it could be an extra field on the form.
continue
if len(cls.exclude) > len(set(cls.exclude)):
raise ImproperlyConfigured('There are duplicate field(s) in %s.exclude' % cls.__name__)
# form
if hasattr(cls, 'form') and not issubclass(cls.form, BaseModelForm):
raise ImproperlyConfigured("%s.form does not inherit from "
"BaseModelForm." % cls.__name__)
# filter_vertical
if hasattr(cls, 'filter_vertical'):
check_isseq(cls, 'filter_vertical', cls.filter_vertical)
for idx, field in enumerate(cls.filter_vertical):
f = get_field(cls, model, opts, 'filter_vertical', field)
if not isinstance(f, models.ManyToManyField):
raise ImproperlyConfigured("'%s.filter_vertical[%d]' must be "
"a ManyToManyField." % (cls.__name__, idx))
# filter_horizontal
if hasattr(cls, 'filter_horizontal'):
check_isseq(cls, 'filter_horizontal', cls.filter_horizontal)
for idx, field in enumerate(cls.filter_horizontal):
f = get_field(cls, model, opts, 'filter_horizontal', field)
if not isinstance(f, models.ManyToManyField):
raise ImproperlyConfigured("'%s.filter_horizontal[%d]' must be "
"a ManyToManyField." % (cls.__name__, idx))
# radio_fields
if hasattr(cls, 'radio_fields'):
check_isdict(cls, 'radio_fields', cls.radio_fields)
for field, val in cls.radio_fields.items():
f = get_field(cls, model, opts, 'radio_fields', field)
if not (isinstance(f, models.ForeignKey) or f.choices):
raise ImproperlyConfigured("'%s.radio_fields['%s']' "
"is neither an instance of ForeignKey nor does "
"have choices set." % (cls.__name__, field))
if not val in (HORIZONTAL, VERTICAL):
raise ImproperlyConfigured("'%s.radio_fields['%s']' "
"is neither admin.HORIZONTAL nor admin.VERTICAL."
% (cls.__name__, field))
# prepopulated_fields
if hasattr(cls, 'prepopulated_fields'):
check_isdict(cls, 'prepopulated_fields', cls.prepopulated_fields)
for field, val in cls.prepopulated_fields.items():
f = get_field(cls, model, opts, 'prepopulated_fields', field)
if isinstance(f, (models.DateTimeField, models.ForeignKey,
models.ManyToManyField)):
raise ImproperlyConfigured("'%s.prepopulated_fields['%s']' "
"is either a DateTimeField, ForeignKey or "
"ManyToManyField. This isn't allowed."
% (cls.__name__, field))
check_isseq(cls, "prepopulated_fields['%s']" % field, val)
for idx, f in enumerate(val):
get_field(cls, model, opts, "prepopulated_fields['%s'][%d]" % (field, idx), f)
def check_isseq(cls, label, obj):
if not isinstance(obj, (list, tuple)):
raise ImproperlyConfigured("'%s.%s' must be a list or tuple." % (cls.__name__, label))
def check_isdict(cls, label, obj):
if not isinstance(obj, dict):
raise ImproperlyConfigured("'%s.%s' must be a dictionary." % (cls.__name__, label))
def get_field(cls, model, opts, label, field):
try:
return opts.get_field(field)
except models.FieldDoesNotExist:
raise ImproperlyConfigured("'%s.%s' refers to field '%s' that is missing from model '%s.%s'."
% (cls.__name__, label, field, model._meta.app_label, model.__name__))
def check_formfield(cls, model, opts, label, field):
if getattr(cls.form, 'base_fields', None):
try:
cls.form.base_fields[field]
except KeyError:
raise ImproperlyConfigured("'%s.%s' refers to field '%s' that "
"is missing from the form." % (cls.__name__, label, field))
else:
get_form_is_overridden = hasattr(cls, 'get_form') and cls.get_form != ModelAdmin.get_form
if not get_form_is_overridden:
fields = fields_for_model(model)
try:
fields[field]
except KeyError:
raise ImproperlyConfigured("'%s.%s' refers to field '%s' that "
"is missing from the form." % (cls.__name__, label, field))
def fetch_attr(cls, model, opts, label, field):
try:
return opts.get_field(field)
except models.FieldDoesNotExist:
pass
try:
return getattr(model, field)
except AttributeError:
raise ImproperlyConfigured("'%s.%s' refers to '%s' that is neither a field, method or property of model '%s.%s'."
% (cls.__name__, label, field, model._meta.app_label, model.__name__))
def check_readonly_fields(cls, model, opts):
check_isseq(cls, "readonly_fields", cls.readonly_fields)
for idx, field in enumerate(cls.readonly_fields):
if not callable(field):
if not hasattr(cls, field):
if not hasattr(model, field):
try:
opts.get_field(field)
except models.FieldDoesNotExist:
raise ImproperlyConfigured("%s.readonly_fields[%d], %r is not a callable or an attribute of %r or found in the model %r."
% (cls.__name__, idx, field, cls.__name__, model._meta.object_name))
| agpl-3.0 |
recklessromeo/otm-core | opentreemap/importer/tasks.py | 2 | 6486 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import json
from celery import task, chord
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from treemap.models import Species
from importer.models.base import GenericImportEvent, GenericImportRow
from importer.models.species import SpeciesImportEvent, SpeciesImportRow
from importer.models.trees import TreeImportEvent, TreeImportRow
from importer import errors, fields
from importer.util import (clean_row_data, clean_field_name,
utf8_file_to_csv_dictreader)
BLOCK_SIZE = 250
def _create_rows_for_event(ie, csv_file):
# Don't use a transaction for this possibly long-running operation
# so we can show progress. Caller does manual cleanup if necessary.
reader = utf8_file_to_csv_dictreader(csv_file)
field_names = reader.fieldnames
ie.field_order = json.dumps(field_names)
ie.save()
field_names = [clean_field_name(f) for f in field_names]
file_valid = ie.validate_field_names(field_names)
if file_valid:
_create_rows(ie, reader)
if ie.row_count == 0:
file_valid = False
ie.append_error(errors.EMPTY_FILE)
if file_valid:
return True
else:
ie.status = ie.FAILED_FILE_VERIFICATION
ie.save()
return False
def _create_rows(ie, reader):
RowModel = get_import_row_model(ie.import_type)
rows = []
idx = 0
for row in reader:
data = json.dumps(clean_row_data(row))
rows.append(RowModel(data=data, import_event=ie, idx=idx))
idx += 1
if int(idx / BLOCK_SIZE) * BLOCK_SIZE == idx:
RowModel.objects.bulk_create(rows)
rows = []
if rows:
RowModel.objects.bulk_create(rows) # create final partial block
@task()
def run_import_event_validation(import_type, import_event_id, file_obj):
ie = _get_import_event(import_type, import_event_id)
try:
ie.status = GenericImportEvent.LOADING
ie.save()
success = _create_rows_for_event(ie, file_obj)
except Exception as e:
ie.append_error(errors.GENERIC_ERROR, data=[str(e)])
ie.status = GenericImportEvent.FAILED_FILE_VERIFICATION
ie.save()
success = False
if not success:
try:
ie.row_set().delete()
except Exception:
pass
return
ie.status = GenericImportEvent.PREPARING_VERIFICATION
ie.save()
try:
row_set = ie.rows()
validation_tasks = (_validate_rows.subtask(row_set[i:(i+BLOCK_SIZE)])
for i in xrange(0, ie.row_count, BLOCK_SIZE))
final_task = _finalize_validation.si(import_type, import_event_id)
async_result = chord(validation_tasks, final_task).delay()
group_result = async_result.parent
if group_result: # Has value None when run in unit tests
group_result.save()
ie.task_id = group_result.id
ie.status = GenericImportEvent.VERIFIYING
ie.save()
except Exception as e:
ie.status = GenericImportEvent.VERIFICATION_ERROR
ie.save()
try:
ie.append_error(errors.GENERIC_ERROR, data=[str(e)])
ie.save()
ie.row_set().delete()
except Exception:
pass
return
@task()
def _validate_rows(*rows):
for row in rows:
row.validate_row()
@task()
def _finalize_validation(import_type, import_event_id):
ie = _get_import_event(import_type, import_event_id)
ie.task_id = ''
# There shouldn't be any rows left to verify, but it doesn't hurt to check
if _get_waiting_row_count(ie) == 0:
ie.status = GenericImportEvent.FINISHED_VERIFICATION
ie.save()
@task()
def commit_import_event(import_type, import_event_id):
ie = _get_import_event(import_type, import_event_id)
commit_tasks = [_commit_rows.s(import_type, import_event_id, i)
for i in xrange(0, ie.row_count, BLOCK_SIZE)]
finalize_task = _finalize_commit.si(import_type, import_event_id)
chord(commit_tasks, finalize_task).delay()
@task()
@transaction.atomic
def _commit_rows(import_type, import_event_id, i):
ie = _get_import_event(import_type, import_event_id)
for row in ie.rows()[i:(i + BLOCK_SIZE)]:
row.commit_row()
@task()
def _finalize_commit(import_type, import_event_id):
ie = _get_import_event(import_type, import_event_id)
ie.status = GenericImportEvent.FINISHED_CREATING
ie.save()
if import_type == TreeImportEvent.import_type:
ie.instance.update_geo_rev()
def _get_import_event(import_type, import_event_id):
Model = get_import_event_model(import_type)
try:
return Model.objects.get(pk=import_event_id)
except ObjectDoesNotExist:
raise Exception('Import event not found "%s" %s'
% (import_type, import_event_id))
def get_import_event_model(import_type):
if import_type == SpeciesImportEvent.import_type:
Model = SpeciesImportEvent
elif import_type == TreeImportEvent.import_type:
Model = TreeImportEvent
else:
raise Exception('Invalid import type "%s"' % import_type)
return Model
def get_import_row_model(import_type):
if import_type == SpeciesImportEvent.import_type:
Model = SpeciesImportRow
elif import_type == TreeImportEvent.import_type:
Model = TreeImportRow
else:
raise Exception('Invalid import type "%s"' % import_type)
return Model
def _get_waiting_row_count(ie):
return ie.rows()\
.filter(status=GenericImportRow.WAITING)\
.count()
def _species_export_builder(model):
model_dict = model.as_dict()
obj = {}
for k, v in SpeciesImportRow.SPECIES_MAP.iteritems():
if v in fields.species.ALL:
if k in model_dict:
val = model_dict[k]
if not val is None:
obj[v] = val
return obj
@task
def get_all_species_export(instance_id):
return [_species_export_builder(species) for species
in Species.objects.filter(instance_id=instance_id)]
@task
def get_import_export(import_type, import_event_id):
ie = _get_import_event(import_type, import_event_id)
return [clean_row_data(json.loads(row.data)) for row in ie.rows()]
| agpl-3.0 |
tensorflow/estimator | tensorflow_estimator/python/estimator/canned/optimizers_test.py | 1 | 3665 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for optimizers.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_estimator.python.estimator.canned import optimizers
class _TestOptimizer(tf.compat.v1.train.Optimizer):
def __init__(self):
super(_TestOptimizer, self).__init__(
use_locking=False, name='TestOptimizer')
class GetOptimizerInstance(tf.test.TestCase):
def test_unsupported_name(self):
with self.assertRaisesRegexp(
ValueError, 'Unsupported optimizer name: unsupported_name'):
optimizers.get_optimizer_instance('unsupported_name', learning_rate=0.1)
def test_supported_name_but_learning_rate_none(self):
with self.assertRaisesRegexp(
ValueError, 'learning_rate must be specified when opt is string'):
optimizers.get_optimizer_instance('Adagrad', learning_rate=None)
def test_adagrad(self):
opt = optimizers.get_optimizer_instance('Adagrad', learning_rate=0.1)
self.assertIsInstance(opt, tf.compat.v1.train.AdagradOptimizer)
self.assertAlmostEqual(0.1, opt._learning_rate)
def test_adam(self):
opt = optimizers.get_optimizer_instance('Adam', learning_rate=0.1)
self.assertIsInstance(opt, tf.compat.v1.train.AdamOptimizer)
self.assertAlmostEqual(0.1, opt._lr)
def test_ftrl(self):
opt = optimizers.get_optimizer_instance('Ftrl', learning_rate=0.1)
self.assertIsInstance(opt, tf.compat.v1.train.FtrlOptimizer)
self.assertAlmostEqual(0.1, opt._learning_rate)
def test_rmsprop(self):
opt = optimizers.get_optimizer_instance('RMSProp', learning_rate=0.1)
self.assertIsInstance(opt, tf.compat.v1.train.RMSPropOptimizer)
self.assertAlmostEqual(0.1, opt._learning_rate)
def test_sgd(self):
opt = optimizers.get_optimizer_instance('SGD', learning_rate=0.1)
self.assertIsInstance(opt, tf.compat.v1.train.GradientDescentOptimizer)
self.assertAlmostEqual(0.1, opt._learning_rate)
def test_object(self):
opt = optimizers.get_optimizer_instance(_TestOptimizer())
self.assertIsInstance(opt, _TestOptimizer)
def test_object_invalid(self):
with self.assertRaisesRegexp(
ValueError, 'The given object is not an Optimizer instance'):
optimizers.get_optimizer_instance((1, 2, 3))
def test_callable(self):
def _optimizer_fn():
return _TestOptimizer()
opt = optimizers.get_optimizer_instance(_optimizer_fn)
self.assertIsInstance(opt, _TestOptimizer)
def test_lambda(self):
opt = optimizers.get_optimizer_instance(lambda: _TestOptimizer()) # pylint: disable=unnecessary-lambda
self.assertIsInstance(opt, _TestOptimizer)
def test_callable_returns_invalid(self):
def _optimizer_fn():
return (1, 2, 3)
with self.assertRaisesRegexp(
ValueError, 'The given object is not an Optimizer instance'):
optimizers.get_optimizer_instance(_optimizer_fn)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
AuyaJackie/odoo | addons/stock_account/stock_account.py | 77 | 19901 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import SUPERUSER_ID, api
import logging
_logger = logging.getLogger(__name__)
class stock_inventory(osv.osv):
_inherit = "stock.inventory"
_columns = {
'period_id': fields.many2one('account.period', 'Force Valuation Period', help="Choose the accounting period where you want to value the stock moves created by the inventory instead of the default one (chosen by the inventory end date)"),
}
def post_inventory(self, cr, uid, inv, context=None):
if context is None:
context = {}
ctx = context.copy()
if inv.period_id:
ctx['force_period'] = inv.period_id.id
return super(stock_inventory, self).post_inventory(cr, uid, inv, context=ctx)
#----------------------------------------------------------
# Stock Location
#----------------------------------------------------------
class stock_location(osv.osv):
_inherit = "stock.location"
_columns = {
'valuation_in_account_id': fields.many2one('account.account', 'Stock Valuation Account (Incoming)', domain=[('type', '=', 'other')],
help="Used for real-time inventory valuation. When set on a virtual location (non internal type), "
"this account will be used to hold the value of products being moved from an internal location "
"into this location, instead of the generic Stock Output Account set on the product. "
"This has no effect for internal locations."),
'valuation_out_account_id': fields.many2one('account.account', 'Stock Valuation Account (Outgoing)', domain=[('type', '=', 'other')],
help="Used for real-time inventory valuation. When set on a virtual location (non internal type), "
"this account will be used to hold the value of products being moved out of this location "
"and into an internal location, instead of the generic Stock Output Account set on the product. "
"This has no effect for internal locations."),
}
#----------------------------------------------------------
# Quants
#----------------------------------------------------------
class stock_quant(osv.osv):
_inherit = "stock.quant"
def _get_inventory_value(self, cr, uid, quant, context=None):
if quant.product_id.cost_method in ('real'):
return quant.cost * quant.qty
return super(stock_quant, self)._get_inventory_value(cr, uid, quant, context=context)
@api.cr_uid_ids_context
def _price_update(self, cr, uid, quant_ids, newprice, context=None):
''' This function is called at the end of negative quant reconciliation and does the accounting entries adjustemnts and the update of the product cost price if needed
'''
if context is None:
context = {}
account_period = self.pool['account.period']
super(stock_quant, self)._price_update(cr, uid, quant_ids, newprice, context=context)
for quant in self.browse(cr, uid, quant_ids, context=context):
move = self._get_latest_move(cr, uid, quant, context=context)
valuation_update = newprice - quant.cost
# this is where we post accounting entries for adjustment, if needed
if not quant.company_id.currency_id.is_zero(valuation_update):
# adjustment journal entry needed, cost has been updated
period_id = (context.get('force_period') or
account_period.find(cr, uid, move.date, context=context)[0])
period = account_period.browse(cr, uid, period_id, context=context)
# If neg quant period already closed (likely with manual valuation), skip update
if period.state != 'done':
ctx = dict(context, force_valuation_amount=valuation_update)
self._account_entry_move(cr, uid, [quant], move, context=ctx)
#update the standard price of the product, only if we would have done it if we'd have had enough stock at first, which means
#1) the product cost's method is 'real'
#2) we just fixed a negative quant caused by an outgoing shipment
if quant.product_id.cost_method == 'real' and quant.location_id.usage != 'internal':
self.pool.get('stock.move')._store_average_cost_price(cr, uid, move, context=context)
def _account_entry_move(self, cr, uid, quants, move, context=None):
"""
Accounting Valuation Entries
quants: browse record list of Quants to create accounting valuation entries for. Unempty and all quants are supposed to have the same location id (thay already moved in)
move: Move to use. browse record
"""
if context is None:
context = {}
location_obj = self.pool.get('stock.location')
location_from = move.location_id
location_to = quants[0].location_id
company_from = location_obj._location_owner(cr, uid, location_from, context=context)
company_to = location_obj._location_owner(cr, uid, location_to, context=context)
if move.product_id.valuation != 'real_time':
return False
for q in quants:
if q.owner_id:
#if the quant isn't owned by the company, we don't make any valuation entry
return False
if q.qty <= 0:
#we don't make any stock valuation for negative quants because the valuation is already made for the counterpart.
#At that time the valuation will be made at the product cost price and afterward there will be new accounting entries
#to make the adjustments when we know the real cost price.
return False
#in case of routes making the link between several warehouse of the same company, the transit location belongs to this company, so we don't need to create accounting entries
# Create Journal Entry for products arriving in the company
if company_to and (move.location_id.usage not in ('internal', 'transit') and move.location_dest_id.usage == 'internal' or company_from != company_to):
ctx = context.copy()
ctx['force_company'] = company_to.id
journal_id, acc_src, acc_dest, acc_valuation = self._get_accounting_data_for_valuation(cr, uid, move, context=ctx)
if location_from and location_from.usage == 'customer':
#goods returned from customer
self._create_account_move_line(cr, uid, quants, move, acc_dest, acc_valuation, journal_id, context=ctx)
else:
self._create_account_move_line(cr, uid, quants, move, acc_src, acc_valuation, journal_id, context=ctx)
# Create Journal Entry for products leaving the company
if company_from and (move.location_id.usage == 'internal' and move.location_dest_id.usage not in ('internal', 'transit') or company_from != company_to):
ctx = context.copy()
ctx['force_company'] = company_from.id
journal_id, acc_src, acc_dest, acc_valuation = self._get_accounting_data_for_valuation(cr, uid, move, context=ctx)
if location_to and location_to.usage == 'supplier':
#goods returned to supplier
self._create_account_move_line(cr, uid, quants, move, acc_valuation, acc_src, journal_id, context=ctx)
else:
self._create_account_move_line(cr, uid, quants, move, acc_valuation, acc_dest, journal_id, context=ctx)
def _quant_create(self, cr, uid, qty, move, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False, force_location_from=False, force_location_to=False, context=None):
quant = super(stock_quant, self)._quant_create(cr, uid, qty, move, lot_id=lot_id, owner_id=owner_id, src_package_id=src_package_id, dest_package_id=dest_package_id, force_location_from=force_location_from, force_location_to=force_location_to, context=context)
if move.product_id.valuation == 'real_time':
self._account_entry_move(cr, uid, [quant], move, context)
return quant
def move_quants_write(self, cr, uid, quants, move, location_dest_id, dest_package_id, context=None):
res = super(stock_quant, self).move_quants_write(cr, uid, quants, move, location_dest_id, dest_package_id, context=context)
if move.product_id.valuation == 'real_time':
self._account_entry_move(cr, uid, quants, move, context=context)
return res
def _get_accounting_data_for_valuation(self, cr, uid, move, context=None):
"""
Return the accounts and journal to use to post Journal Entries for the real-time
valuation of the quant.
:param context: context dictionary that can explicitly mention the company to consider via the 'force_company' key
:returns: journal_id, source account, destination account, valuation account
:raise: osv.except_osv() is any mandatory account or journal is not defined.
"""
product_obj = self.pool.get('product.template')
accounts = product_obj.get_product_accounts(cr, uid, move.product_id.product_tmpl_id.id, context)
if move.location_id.valuation_out_account_id:
acc_src = move.location_id.valuation_out_account_id.id
else:
acc_src = accounts['stock_account_input']
if move.location_dest_id.valuation_in_account_id:
acc_dest = move.location_dest_id.valuation_in_account_id.id
else:
acc_dest = accounts['stock_account_output']
acc_valuation = accounts.get('property_stock_valuation_account_id', False)
journal_id = accounts['stock_journal']
return journal_id, acc_src, acc_dest, acc_valuation
def _prepare_account_move_line(self, cr, uid, move, qty, cost, credit_account_id, debit_account_id, context=None):
"""
Generate the account.move.line values to post to track the stock valuation difference due to the
processing of the given quant.
"""
if context is None:
context = {}
currency_obj = self.pool.get('res.currency')
if context.get('force_valuation_amount'):
valuation_amount = context.get('force_valuation_amount')
else:
if move.product_id.cost_method == 'average':
valuation_amount = cost if move.location_id.usage != 'internal' and move.location_dest_id.usage == 'internal' else move.product_id.standard_price
else:
valuation_amount = cost if move.product_id.cost_method == 'real' else move.product_id.standard_price
#the standard_price of the product may be in another decimal precision, or not compatible with the coinage of
#the company currency... so we need to use round() before creating the accounting entries.
valuation_amount = currency_obj.round(cr, uid, move.company_id.currency_id, valuation_amount * qty)
partner_id = (move.picking_id.partner_id and self.pool.get('res.partner')._find_accounting_partner(move.picking_id.partner_id).id) or False
debit_line_vals = {
'name': move.name,
'product_id': move.product_id.id,
'quantity': qty,
'product_uom_id': move.product_id.uom_id.id,
'ref': move.picking_id and move.picking_id.name or False,
'date': move.date,
'partner_id': partner_id,
'debit': valuation_amount > 0 and valuation_amount or 0,
'credit': valuation_amount < 0 and -valuation_amount or 0,
'account_id': debit_account_id,
}
credit_line_vals = {
'name': move.name,
'product_id': move.product_id.id,
'quantity': qty,
'product_uom_id': move.product_id.uom_id.id,
'ref': move.picking_id and move.picking_id.name or False,
'date': move.date,
'partner_id': partner_id,
'credit': valuation_amount > 0 and valuation_amount or 0,
'debit': valuation_amount < 0 and -valuation_amount or 0,
'account_id': credit_account_id,
}
return [(0, 0, debit_line_vals), (0, 0, credit_line_vals)]
def _create_account_move_line(self, cr, uid, quants, move, credit_account_id, debit_account_id, journal_id, context=None):
#group quants by cost
quant_cost_qty = {}
for quant in quants:
if quant_cost_qty.get(quant.cost):
quant_cost_qty[quant.cost] += quant.qty
else:
quant_cost_qty[quant.cost] = quant.qty
move_obj = self.pool.get('account.move')
for cost, qty in quant_cost_qty.items():
move_lines = self._prepare_account_move_line(cr, uid, move, qty, cost, credit_account_id, debit_account_id, context=context)
period_id = context.get('force_period', self.pool.get('account.period').find(cr, uid, context=context)[0])
move_obj.create(cr, uid, {'journal_id': journal_id,
'line_id': move_lines,
'period_id': period_id,
'date': fields.date.context_today(self, cr, uid, context=context),
'ref': move.picking_id.name}, context=context)
#def _reconcile_single_negative_quant(self, cr, uid, to_solve_quant, quant, quant_neg, qty, context=None):
# move = self._get_latest_move(cr, uid, to_solve_quant, context=context)
# quant_neg_position = quant_neg.negative_dest_location_id.usage
# remaining_solving_quant, remaining_to_solve_quant = super(stock_quant, self)._reconcile_single_negative_quant(cr, uid, to_solve_quant, quant, quant_neg, qty, context=context)
# #update the standard price of the product, only if we would have done it if we'd have had enough stock at first, which means
# #1) there isn't any negative quant anymore
# #2) the product cost's method is 'real'
# #3) we just fixed a negative quant caused by an outgoing shipment
# if not remaining_to_solve_quant and move.product_id.cost_method == 'real' and quant_neg_position != 'internal':
# self.pool.get('stock.move')._store_average_cost_price(cr, uid, move, context=context)
# return remaining_solving_quant, remaining_to_solve_quant
class stock_move(osv.osv):
_inherit = "stock.move"
def action_done(self, cr, uid, ids, context=None):
self.product_price_update_before_done(cr, uid, ids, context=context)
res = super(stock_move, self).action_done(cr, uid, ids, context=context)
self.product_price_update_after_done(cr, uid, ids, context=context)
return res
def _store_average_cost_price(self, cr, uid, move, context=None):
''' move is a browe record '''
product_obj = self.pool.get('product.product')
if any([q.qty <= 0 for q in move.quant_ids]):
#if there is a negative quant, the standard price shouldn't be updated
return
#Note: here we can't store a quant.cost directly as we may have moved out 2 units (1 unit to 5€ and 1 unit to 7€) and in case of a product return of 1 unit, we can't know which of the 2 costs has to be used (5€ or 7€?). So at that time, thanks to the average valuation price we are storing we will svaluate it at 6€
average_valuation_price = 0.0
for q in move.quant_ids:
average_valuation_price += q.qty * q.cost
average_valuation_price = average_valuation_price / move.product_qty
# Write the standard price, as SUPERUSER_ID because a warehouse manager may not have the right to write on products
ctx = dict(context or {}, force_company=move.company_id.id)
product_obj.write(cr, SUPERUSER_ID, [move.product_id.id], {'standard_price': average_valuation_price}, context=ctx)
self.write(cr, uid, [move.id], {'price_unit': average_valuation_price}, context=context)
def product_price_update_before_done(self, cr, uid, ids, context=None):
product_obj = self.pool.get('product.product')
tmpl_dict = {}
for move in self.browse(cr, uid, ids, context=context):
#adapt standard price on incomming moves if the product cost_method is 'average'
if (move.location_id.usage == 'supplier') and (move.product_id.cost_method == 'average'):
product = move.product_id
prod_tmpl_id = move.product_id.product_tmpl_id.id
qty_available = move.product_id.product_tmpl_id.qty_available
if tmpl_dict.get(prod_tmpl_id):
product_avail = qty_available + tmpl_dict[prod_tmpl_id]
else:
tmpl_dict[prod_tmpl_id] = 0
product_avail = qty_available
if product_avail <= 0:
new_std_price = move.price_unit
else:
# Get the standard price
amount_unit = product.standard_price
new_std_price = ((amount_unit * product_avail) + (move.price_unit * move.product_qty)) / (product_avail + move.product_qty)
tmpl_dict[prod_tmpl_id] += move.product_qty
# Write the standard price, as SUPERUSER_ID because a warehouse manager may not have the right to write on products
ctx = dict(context or {}, force_company=move.company_id.id)
product_obj.write(cr, SUPERUSER_ID, [product.id], {'standard_price': new_std_price}, context=ctx)
def product_price_update_after_done(self, cr, uid, ids, context=None):
'''
This method adapts the price on the product when necessary
'''
for move in self.browse(cr, uid, ids, context=context):
#adapt standard price on outgoing moves if the product cost_method is 'real', so that a return
#or an inventory loss is made using the last value used for an outgoing valuation.
if move.product_id.cost_method == 'real' and move.location_dest_id.usage != 'internal':
#store the average price of the move on the move and product form
self._store_average_cost_price(cr, uid, move, context=context)
| agpl-3.0 |
ajrichards/htsint | docs/conf.py | 1 | 8728 | # -*- coding: utf-8 -*-
# This file was auto-generated using lpEdit
import sys, os, time, ast
from lpedit import SphinxLogger
from htsint import __version__
currentFilePath = os.path.realpath(__file__)
logFilePath = os.path.join(os.path.split(currentFilePath)[0],'sphinx.log')
sl = SphinxLogger(logFilePath)
## these variables can be overwritten here or edited using lpEdit
projectName = "htsint - %s"%__version__
authors = sl.log['authors']
extensions = ['sphinx.ext.pngmath',
'sphinx.ext.doctest',
'sphinx.ext.graphviz',
'sphinx.ext.todo',
'sphinx.ext.viewcode']
version = sl.log['version']
release = sl.log['release']
htmlTitle = "htsint - %s"%__version__
htmlTheme = "agogo"
htmlShowCopyright = ast.literal_eval(sl.log['html_show_copyright'])
paperSize = sl.log['latex_papersize']
pointSize = sl.log['latex_pointsize']
preamble = sl.log['latex_preamble']
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
## see from imported
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'%s'%projectName
copyright = u'%s, %s'%(time.localtime().tm_year,'A. Richards')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = htmlTheme # default,nature,haiku,agogo
##3333CC
html_theme_options = {
"headerbg": "#001A57",
"bodyfont": 'sans-serif'
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = htmlTitle
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = os.path.join("images","logo.png")
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = htmlShowCopyright
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc'%projectName
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': paperSize,
# The font size ('10pt', '11pt' or '12pt').
'pointsize': pointSize,
# Additional stuff for the LaTeX preamble.
'preamble': preamble,
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', '%s.tex'%projectName, u'%s'%projectName,
u'%s'%authors, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
#man_pages = [
# ('index', 'basicpythonpy', u'BasicPython.py Documentation',
# [u'Bilbo'], 1)
#]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
#texinfo_documents = [
# ('index', 'BasicPythonpy', u'BasicPython.py Documentation',
# u'Bilbo', 'BasicPythonpy', 'One line description of project.',
# 'Miscellaneous'),
#]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| bsd-3-clause |
hks-epod/configuration | util/jenkins/check-celery-queues.py | 4 | 6892 | import redis
import click
import boto3
import botocore
import backoff
from itertools import zip_longest
max_tries = 5
class RedisWrapper(object):
def __init__(self, *args, **kwargs):
self.redis = redis.StrictRedis(*args, **kwargs)
@backoff.on_exception(backoff.expo,
(redis.exceptions.TimeoutError,
redis.exceptions.ConnectionError),
max_tries=max_tries)
def keys(self):
return self.redis.keys()
@backoff.on_exception(backoff.expo,
(redis.exceptions.TimeoutError,
redis.exceptions.ConnectionError),
max_tries=max_tries)
def type(self, key):
return self.redis.type(key)
@backoff.on_exception(backoff.expo,
(redis.exceptions.TimeoutError,
redis.exceptions.ConnectionError),
max_tries=max_tries)
def llen(self, key):
return self.redis.llen(key)
class CwBotoWrapper(object):
def __init__(self):
self.client = boto3.client('cloudwatch')
@backoff.on_exception(backoff.expo,
(botocore.exceptions.ClientError),
max_tries=max_tries)
def list_metrics(self, *args, **kwargs):
return self.client.list_metrics(*args, **kwargs)
@backoff.on_exception(backoff.expo,
(botocore.exceptions.ClientError),
max_tries=max_tries)
def put_metric_data(self, *args, **kwargs):
return self.client.put_metric_data(*args, **kwargs)
@backoff.on_exception(backoff.expo,
(botocore.exceptions.ClientError),
max_tries=max_tries)
def describe_alarms_for_metric(self, *args, **kwargs):
return self.client.describe_alarms_for_metric(*args, **kwargs)
@backoff.on_exception(backoff.expo,
(botocore.exceptions.ClientError),
max_tries=max_tries)
def put_metric_alarm(self, *args, **kwargs):
return self.client.put_metric_alarm(*args, **kwargs)
@click.command()
@click.option('--host', '-h', default='localhost',
help='Hostname of redis server')
@click.option('--port', '-p', default=6379, help='Port of redis server')
@click.option('--environment', '-e', required=True)
@click.option('--deploy', '-d', required=True,
help="Deployment (i.e. edx or edge)")
@click.option('--max-metrics', default=20,
help='Maximum number of CloudWatch metrics to publish')
@click.option('--threshold', default=50,
help='Default maximum queue length before alarm notification is'
+ ' sent')
@click.option('--queue-threshold', type=(str, int), multiple=True,
help='Threshold per queue in format --queue-threshold'
+ ' {queue_name} {threshold}. May be used multiple times')
@click.option('--sns-arn', '-s', help='ARN for SNS alert topic', required=True)
def check_queues(host, port, environment, deploy, max_metrics, threshold,
queue_threshold, sns_arn):
thresholds = dict(queue_threshold)
timeout = 1
namespace = "celery/{}-{}".format(environment, deploy)
redis_client = RedisWrapper(host=host, port=port, socket_timeout=timeout,
socket_connect_timeout=timeout)
cloudwatch = CwBotoWrapper()
metric_name = 'queue_length'
dimension = 'queue'
response = cloudwatch.list_metrics(Namespace=namespace,
MetricName=metric_name,
Dimensions=[{'Name': dimension}])
existing_queues = []
for m in response["Metrics"]:
existing_queues.extend(
[d['Value'] for d in m["Dimensions"] if d['Name'] == dimension])
redis_queues = set([k.decode() for k in redis_client.keys()
if redis_client.type(k) == b'list'])
all_queues = existing_queues + list(
set(redis_queues).difference(existing_queues)
)
for queues in grouper(all_queues, max_metrics):
# grouper can return a bunch of Nones and we want to skip those
queues = [q for q in queues if q is not None]
metric_data = []
for queue in queues:
metric_data.append({
'MetricName': metric_name,
'Dimensions': [{
"Name": dimension,
"Value": queue
}],
'Value': redis_client.llen(queue)
})
if len(metric_data) > 0:
cloudwatch.put_metric_data(Namespace=namespace, MetricData=metric_data)
for queue in queues:
dimensions = [{'Name': dimension, 'Value': queue}]
queue_threshold = threshold
if queue in thresholds:
queue_threshold = thresholds[queue]
# Period is in seconds
period = 60
evaluation_periods = 15
comparison_operator = "GreaterThanThreshold"
treat_missing_data = "notBreaching"
statistic = "Maximum"
actions = [sns_arn]
alarm_name = "{}-{} {} queue length over threshold".format(environment,
deploy,
queue)
print('Creating or updating alarm "{}"'.format(alarm_name))
cloudwatch.put_metric_alarm(AlarmName=alarm_name,
AlarmDescription=alarm_name,
Namespace=namespace,
MetricName=metric_name,
Dimensions=dimensions,
Period=period,
EvaluationPeriods=evaluation_periods,
TreatMissingData=treat_missing_data,
Threshold=queue_threshold,
ComparisonOperator=comparison_operator,
Statistic=statistic,
InsufficientDataActions=actions,
OKActions=actions,
AlarmActions=actions)
# Stolen right from the itertools recipes
# https://docs.python.org/3/library/itertools.html#itertools-recipes
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
if __name__ == '__main__':
check_queues()
| agpl-3.0 |
trulywireless/pjsip | pjsip-apps/build/get-footprint.py | 59 | 12629 | # $Id: get-footprint.py 1352 2007-06-08 01:41:25Z bennylp $
#
# This file is used to generate PJSIP/PJMEDIA footprint report.
# To use this file, just run it in pjsip-apps/build directory, to
# produce footprint.txt and footprint.htm report files.
#
import os
import sys
import string
import time
compile_flags1 = [
# Base
['BASE', 'Empty application size'],
['', 'Subtotal: Empty application size'],
['HAS_PJLIB', 'Minimum PJLIB only'],
# Subtotal
['', 'Subtotal'],
# PJLIB-UTIL
['HAS_PJLIB_STUN', 'STUN client'],
['HAS_PJLIB_GETOPT', 'getopt() functionality'],
# Subtotal
['', 'TOTAL']
]
compile_flags = [
# Base
['BASE', 'Empty application size'],
['', 'Subtotal: empty application size on this platform'],
['HAS_PJLIB', 'PJLIB (pool, data structures, hash tables, ioqueue, socket, timer heap, etc.). ' +
'For targets that statically link application with LIBC, the size includes ' +
'various LIBC functions that are used by PJLIB.'],
['', 'Subtotal: Application linked with PJLIB'],
# PJLIB-UTIL
['HAS_PJLIB_STUN', 'PJLIB-UTIL STUN client'],
['HAS_PJLIB_GETOPT', 'PJLIB-UTIL getopt() functionality'],
['HAS_PJLIB_SCANNER', 'PJLIB-UTIL text scanner (needed by SIP parser)'],
['HAS_PJLIB_XML', 'PJLIB-UTIL tiny XML (parsing and API) (needs text scanner)'],
['HAS_PJLIB_DNS', 'PJLIB-UTIL DNS packet and parsing'],
['HAS_PJLIB_RESOLVER', 'PJLIB-UTIL Asynchronous DNS resolver/caching engine'],
['HAS_PJLIB_CRC32', 'PJLIB-UTIL CRC32 algorithm'],
['HAS_PJLIB_HMAC_MD5', 'PJLIB-UTIL HMAC-MD5 algorithm'],
['HAS_PJLIB_HMAC_SHA1', 'PJLIB-UTIL HMAC-SHA1 algorithm'],
# PJSIP
['HAS_PJSIP_CORE_MSG_ELEM', 'PJSIP Core - Messaging Elements and Parsing (message, headers, SIP URI, TEL URI/RFC 3966, etc.)'],
['HAS_PJSIP_CORE', 'PJSIP Core - Endpoint (transport management, module management, event distribution, etc.)'],
['HAS_PJSIP_CORE_MSG_UTIL', 'PJSIP Core - Stateless operations, SIP SRV, server resolution and fail-over'],
['HAS_PJSIP_UDP_TRANSPORT', 'PJSIP UDP transport'],
['', 'Subtotal: A minimalistic SIP application (parsing, UDP transport+STUN, no transaction)'],
['HAS_PJSIP_TCP_TRANSPORT', 'PJSIP TCP transport'],
['HAS_PJSIP_TLS_TRANSPORT', 'PJSIP TLS transport'],
['HAS_PJSIP_INFO', 'PJSIP INFO support (RFC 2976) (no special treatment, thus the zero size)'],
['HAS_PJSIP_TRANSACTION', 'PJSIP transaction and stateful API'],
['HAS_PJSIP_AUTH_CLIENT', 'PJSIP digest authentication client'],
['HAS_PJSIP_UA_LAYER', 'PJSIP User agent layer and base dialog and usage management (draft-ietf-sipping-dialogusage-01)'],
['HAS_PJMEDIA_SDP', 'PJMEDIA SDP Parsing and API (RFC 2327), needed by SDP negotiator'],
['HAS_PJMEDIA_SDP_NEGOTIATOR','PJMEDIA SDP negotiator (RFC 3264), needed by INVITE session'],
['HAS_PJSIP_INV_SESSION', 'PJSIP INVITE session API'],
['HAS_PJSIP_REGC', 'PJSIP client registration API'],
['', 'Subtotal: Minimal SIP application with registration (including digest authentication)'],
['HAS_PJSIP_EVENT_FRAMEWORK','PJSIP Event/SUBSCRIBE framework, RFC 3265 (needed by call transfer, and presence)'],
['HAS_PJSIP_CALL_TRANSFER', 'PJSIP Call Transfer/REFER support (RFC 3515)'],
['', 'Subtotal: Minimal SIP application with call transfer'],
['HAS_PJSIP_PRESENCE', 'PJSIP Presence subscription, including PIDF/X-PIDF support (RFC 3856, RFC 3863, etc) (needs XML)'],
['HAS_PJSIP_MESSAGE', 'PJSIP Instant Messaging/MESSAGE support (RFC 3428) (no special treatment, thus the zero size)'],
['HAS_PJSIP_IS_COMPOSING', 'PJSIP Message Composition indication (RFC 3994)'],
# Subtotal
['', 'Subtotal: Complete PJSIP package (call, registration, presence, IM) +STUN +GETOPT (+PJLIB), no media'],
# PJNATH
['HAS_PJNATH_STUN', 'PJNATH STUN'],
['HAS_PJNATH_ICE', 'PJNATH ICE'],
# PJMEDIA
['HAS_PJMEDIA_EC', 'PJMEDIA accoustic echo cancellation'],
['HAS_PJMEDIA_SND_DEV', 'PJMEDIA sound device backend (platform specific)'],
['HAS_PJMEDIA_SILENCE_DET', 'PJMEDIA Adaptive silence detector'],
['HAS_PJMEDIA', 'PJMEDIA endpoint'],
['HAS_PJMEDIA_PLC', 'PJMEDIA Packet Lost Concealment implementation (needed by G.711, GSM, and sound device port)'],
['HAS_PJMEDIA_SND_PORT', 'PJMEDIA sound device media port'],
['HAS_PJMEDIA_RESAMPLE', 'PJMEDIA resampling algorithm (large filter disabled)'],
['HAS_PJMEDIA_G711_CODEC', 'PJMEDIA G.711 codec (PCMA/PCMU, including PLC) (may have already been linked by other module)'],
['HAS_PJMEDIA_CONFERENCE', 'PJMEDIA conference bridge (needs resampling and silence detector)'],
['HAS_PJMEDIA_MASTER_PORT', 'PJMEDIA master port'],
['HAS_PJMEDIA_RTP', 'PJMEDIA stand-alone RTP'],
['HAS_PJMEDIA_RTCP', 'PJMEDIA stand-alone RTCP and media quality calculation'],
['HAS_PJMEDIA_JBUF', 'PJMEDIA stand-alone adaptive jitter buffer'],
['HAS_PJMEDIA_STREAM', 'PJMEDIA stream for remote media communication (needs RTP, RTCP, and jitter buffer)'],
['HAS_PJMEDIA_TONEGEN', 'PJMEDIA tone generator'],
['HAS_PJMEDIA_UDP_TRANSPORT','PJMEDIA UDP media transport'],
['HAS_PJMEDIA_FILE_PLAYER', 'PJMEDIA WAV file player'],
['HAS_PJMEDIA_FILE_CAPTURE', 'PJMEDIA WAV file writer'],
['HAS_PJMEDIA_MEM_PLAYER', 'PJMEDIA fixed buffer player'],
['HAS_PJMEDIA_MEM_CAPTURE', 'PJMEDIA fixed buffer writer'],
['HAS_PJMEDIA_ICE', 'PJMEDIA ICE transport'],
# Subtotal
['', 'Subtotal: Complete SIP and all PJMEDIA features (G.711 codec only)'],
# Codecs
['HAS_PJMEDIA_GSM_CODEC', 'PJMEDIA GSM codec (including PLC)'],
['HAS_PJMEDIA_SPEEX_CODEC', 'PJMEDIA Speex codec (narrowband, wideband, ultra-wideband)'],
['HAS_PJMEDIA_ILBC_CODEC', 'PJMEDIA iLBC codec'],
# Total
['', 'TOTAL: complete libraries (+all codecs)'],
]
# Executable size report, tuple of:
# <all flags>, <flags added>, <text size>, <data>, <bss>, <description>
exe_size = []
#
# Write the report to text file
#
def print_text_report(filename):
output = open(filename, 'w')
output.write('PJSIP and PJMEDIA footprint report\n')
output.write('Auto-generated by pjsip-apps/build/get-footprint.py\n')
output.write('\n')
# Write Revision info.
f = os.popen('svn info | grep Revision')
output.write(f.readline())
output.write('Date: ')
output.write(time.asctime())
output.write('\n')
output.write('\n')
# Write individual module size
output.write('Footprint (in bytes):\n')
output.write(' .text .data .bss Module Description\n')
output.write('==========================================================\n')
for i in range(1, len(exe_size)):
e = exe_size[i]
prev = exe_size[i-1]
if e[1]<>'':
output.write(' ')
output.write( string.rjust(`string.atoi(e[2]) - string.atoi(prev[2])`, 8) )
output.write( string.rjust(`string.atoi(e[3]) - string.atoi(prev[3])`, 8) )
output.write( string.rjust(`string.atoi(e[4]) - string.atoi(prev[4])`, 8) )
output.write(' ' + e[5] + '\n')
else:
output.write(' ------------------------\n')
output.write(' ')
output.write( string.rjust(e[2], 8) )
output.write( string.rjust(e[3], 8) )
output.write( string.rjust(e[4], 8) )
output.write(' ' + e[5] + '\n')
output.write('\n')
# Done
output.close()
#
# Write the report to HTML file
#
def print_html_report():
# Get Revision info.
f = os.popen('svn info | grep Revision')
revision = f.readline().split()[1]
# Get Machine, OS, and CC name
f = os.popen('make -f Footprint.mak print_name')
names = f.readline().split()
m = names[0]
o = names[1]
cc = names[2]
cc_ver = names[3]
# Open HTML file
filename = 'footprint-' + m + '-' + o + '.htm'
output = open(filename, 'w')
title = 'PJSIP and PJMEDIA footprint report for ' + m + '-' + o + ' target'
output.write('<HTML><HEAD>\n');
output.write(' <TITLE>' + title + '</TITLE>\n')
output.write(' <LINK href="/style/style.css" type="text/css" rel="stylesheet">\n')
output.write('</HEAD>\n');
output.write('<BODY bgcolor="white">\n');
output.write('<!--#include virtual="/header.html" -->')
output.write(' <H1>' + title + '</H1>\n')
output.write('Auto-generated by pjsip-apps/build/get-footprint.py script\n')
output.write('<p>Date: ' + time.asctime() + '<BR>\n')
output.write('Revision: r' + revision + '</p>\n\n')
output.write('<HR>\n')
output.write('\n')
# Info
output.write('<H2>Build Configuration</H2>\n')
# build.mak
output.write('\n<H3>build.mak</H3>\n')
output.write('<tt>\n')
f = open('../../build.mak', 'r')
s = f.readlines()
for l in s:
output.write(l + '<BR>\n')
output.write('</tt>\n')
output.write('<p>Using ' + cc + ' version ' + cc_ver +'</p>\n')
# user.mak
output.write('\n<H3>user.mak</H3>\n')
output.write('<tt>\n')
f = open('../../user.mak', 'r')
s = f.readlines()
for l in s:
output.write(l + '<BR>\n')
output.write('</tt>\n')
# config_site.h
output.write('\n<H3><pj/config.site.h></H3>\n')
output.write('<tt>\n')
f = os.popen('cpp -dM -I../../pjlib/include ../../pjlib/include/pj/config_site.h | grep PJ')
s = f.readlines()
for l in s:
output.write(l + '<BR>\n')
output.write('</tt>\n')
# Write individual module size
output.write('<H2>Footprint Report</H2>\n')
output.write('<p>The table below shows the footprint of individual feature, in bytes.</p>')
output.write('<TABLE border="1" cellpadding="2" cellspacing="0">\n' +
'<TR bgcolor="#e8e8ff">\n' +
' <TD align="center"><strong>.text</strong></TD>\n' +
' <TD align="center"><strong>.data</strong></TD>\n' +
' <TD align="center"><strong>.bss</strong></TD>\n' +
' <TD align="center"><strong>Features/Module Description</strong></TD>\n' +
'</TR>\n')
for i in range(1, len(exe_size)):
e = exe_size[i]
prev = exe_size[i-1]
output.write('<TR>\n')
if e[1]<>'':
output.write( ' <TD align="right">' + `string.atoi(e[2]) - string.atoi(prev[2])` + '</TD>\n')
output.write( ' <TD align="right">' + `string.atoi(e[3]) - string.atoi(prev[3])` + '</TD>\n')
output.write( ' <TD align="right">' + `string.atoi(e[4]) - string.atoi(prev[4])` + '</TD>\n' )
output.write( ' <TD>' + e[5] + '</TD>\n')
else:
empty_size = exe_size[1]
output.write('<TR bgcolor="#e8e8ff">\n')
output.write( ' <TD align="right"> </TD>\n')
output.write( ' <TD align="right"> </TD>\n')
output.write( ' <TD align="right"> </TD>\n')
output.write( ' <TD><strong>' + e[5] + ': .text=' + e[2]+ ', .data=' + e[3] + ', .bss=' + e[4] )
output.write( '\n </strong> <BR>(Size minus empty application size: ' + \
'.text=' + `string.atoi(e[2]) - string.atoi(empty_size[2])` + \
', .data=' + `string.atoi(e[3]) - string.atoi(empty_size[3])` + \
', .data=' + `string.atoi(e[4]) - string.atoi(empty_size[4])` + \
')\n' )
output.write( ' </TD>\n')
output.write('</TR>\n')
output.write('</TABLE>\n')
output.write('<!--#include virtual="/footer.html" -->')
output.write('</BODY>\n')
output.write('</HTML>\n')
# Done
output.close()
#
# Get the size of individual feature
#
def get_size(all_flags, flags, desc):
file = 'footprint.exe'
# Remove file
rc = os.system("make -f Footprint.mak FCFLAGS='" + all_flags + "' clean")
# Make the executable
cmd = "make -f Footprint.mak FCFLAGS='" + all_flags + "' all"
#print cmd
rc = os.system(cmd)
if rc <> 0:
sys.exit(1)
# Run 'size' against the executable
f = os.popen('size ' + file)
# Skip header of the 'size' output
f.readline()
# Get the sizes
size = f.readline()
f.close()
# Split into tokens
tokens = size.split()
# Build the size tuple and add to exe_size
elem = all_flags, flags, tokens[0], tokens[1], tokens[2], desc
exe_size.append(elem)
# Remove file
rc = os.system("make -f Footprint.mak FCFLAGS='" + all_flags + "' clean")
# Main
elem = '', '', '0', '0', '0', ''
exe_size.append(elem)
all_flags = ''
for elem in compile_flags:
if elem[0] <> '':
flags = '-D' + elem[0]
all_flags += flags + ' '
get_size(all_flags, elem[0], elem[1])
else:
e = exe_size[len(exe_size)-1]
n = all_flags, '', e[2], e[3], e[4], elem[1]
exe_size.append(n)
#print_text_report('footprint.txt')
print_html_report()
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.