repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
cpatrick/comic-django | django/comicsite/migrations/0004_auto__add_field_comicsite_description.py | 1 | 2296 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ComicSite.description'
db.add_column('comicsite_comicsite', 'description',
self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ComicSite.description'
db.delete_column('comicsite_comicsite', 'description')
models = {
'comicsite.comicsite': {
'Meta': {'object_name': 'ComicSite'},
'comment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'skin': ('django.db.models.fields.CharField', [], {'max_length': '225'})
},
'comicsite.comicsitemodel': {
'Meta': {'object_name': 'ComicSiteModel'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'comicsite.page': {
'ComicSite': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['comicsite.ComicSite']"}),
'Meta': {'ordering': "['ComicSite', 'order']", 'unique_together': "(('ComicSite', 'title'),)", 'object_name': 'Page'},
'display_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'html': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['comicsite'] | apache-2.0 |
kevinvandervlist/tfa | src/vehicles.py | 1 | 3847 | #!/usr/bin/python
# Vehicle program
# Copyright (C) 2012 Kevin van der Vlist, Rosco Voorrips
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Kevin van der Vlist - kevin@kevinvandervlist.nl
# Rosco Voorrips
""" Vehicles program.
This program can be used to identify the start and the end of a vehicle's trajectory.
"""
import os
import wx
import inspect
from PIL import Image
from collections import deque
from abstractframe import AbstractFrame
from convert import Convert
from imageoperations import ImageOperations
from logger import Logger
from settings import Settings
class Frame(AbstractFrame):
def __init__(self, parent=None, id=-1,pos=wx.DefaultPosition, title='wxPython', size=None, settings=None, fileList=[]):
AbstractFrame.__init__(self, parent, id, pos, title, size, settings, fileList)
def KeyboardEvent(self, event):
zk = self.settings.GetZoomKey()
nk = self.settings.GetNextKey()
pk = self.settings.GetPreviousKey()
sk = self.settings.GetSkipKey()
ekc = event.GetKeyCode()
# zoom
if ekc == zk:
# Toggle zoom
if self.zoomed:
self.zoomed = False
self.SetPilImage()
else:
self.zoomed = True
self.ZoomAtLocation(self.currentMouseLocation)
elif ekc == nk:
if len(self.locationList) > 1:
self.logger.LogVehicle(self.curFileName, self.locationList)
self.OpenNextImage()
elif ekc == pk:
self.OpenPrevImage()
# Skip marking, but indicate it with a different coloured box
elif ekc == sk:
self.skip = True
else:
return
def MouseEventLeft(self, event):
# Only process when zoomed in
if self.zoomed:
self.zoomed = False
x = event.GetX()
y = event.GetY()
point = self.imops.GetOriginalCoords(self.curZoomLevel, (x,y), self.curViewPort)
if self.skip:
# skip modus
self.skip = False
self.locationList.append((0, 0))
self.imops.SetMarking(self.curPilImage, point, self.settings.GetSkipColour(), self.settings.GetMarkingOffset(), self.settings.GetMarkingWidth())
else:
self.locationList.append(point)
self.imops.SetMarking(self.curPilImage, point, self.colour, self.settings.GetMarkingOffset(), self.settings.GetMarkingWidth())
self.SetPilImage()
class App(wx.App):
""" Wx constructor """
def OnInit(self):
# Init settings thingy
settings = Settings(inspect.getfile(inspect.currentframe()))
# Default size:
size=(1400, 800)
# Prepare a list of files to use
directory = settings.GetInDir()
im = [directory + os.sep + f for f in settings.GetFileList(directory)]
im.sort()
# Setup the frame
self.frame = Frame(size=size, settings=settings, fileList=im)
# Finally, show the created frame
self.frame.Show()
self.SetTopWindow(self.frame)
return True
if __name__ == '__main__':
# Application entrypoint
app = App()
app.MainLoop()
| gpl-3.0 |
lwinters-cs470/Flask-OAuth | flask_oauth.py | 19 | 17752 | # -*- coding: utf-8 -*-
"""
flask_oauth
~~~~~~~~~~~
Implements basic OAuth support for Flask.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import httplib2
from functools import wraps
from urlparse import urljoin
from flask import request, session, json, redirect, Response
from werkzeug import url_decode, url_encode, url_quote, \
parse_options_header, Headers
import oauth2
_etree = None
def get_etree():
"""Return an elementtree implementation. Prefers lxml"""
global _etree
if _etree is None:
try:
from lxml import etree as _etree
except ImportError:
try:
from xml.etree import cElementTree as _etree
except ImportError:
try:
from xml.etree import ElementTree as _etree
except ImportError:
raise TypeError('lxml or etree not found')
return _etree
def parse_response(resp, content, strict=False):
ct, options = parse_options_header(resp['content-type'])
if ct in ('application/json', 'text/javascript'):
return json.loads(content)
elif ct in ('application/xml', 'text/xml'):
# technically, text/xml is ascii based but because many
# implementations get that wrong and utf-8 is a superset
# of utf-8 anyways, so there is not much harm in assuming
# utf-8 here
charset = options.get('charset', 'utf-8')
return get_etree().fromstring(content.decode(charset))
elif ct != 'application/x-www-form-urlencoded':
if strict:
return content
charset = options.get('charset', 'utf-8')
return url_decode(content, charset=charset).to_dict()
def add_query(url, args):
if not args:
return url
return url + ('?' in url and '&' or '?') + url_encode(args)
def encode_request_data(data, format):
if format is None:
return data, None
elif format == 'json':
return json.dumps(data or {}), 'application/json'
elif format == 'urlencoded':
return url_encode(data or {}), 'application/x-www-form-urlencoded'
raise TypeError('Unknown format %r' % format)
class OAuthResponse(object):
"""Contains the response sent back from an OAuth protected remote
application.
"""
def __init__(self, resp, content):
#: a :class:`~werkzeug.Headers` object with the response headers
#: the application sent.
self.headers = Headers(resp)
#: the raw, unencoded content from the server
self.raw_data = content
#: the parsed content from the server
self.data = parse_response(resp, content, strict=True)
@property
def status(self):
"""The status code of the response."""
return self.headers.get('status', type=int)
class OAuthClient(oauth2.Client):
def request_new_token(self, uri, callback=None, params={}):
if callback is not None:
params['oauth_callback'] = callback
req = oauth2.Request.from_consumer_and_token(
self.consumer, token=self.token,
http_method='POST', http_url=uri, parameters=params,
is_form_encoded=True)
req.sign_request(self.method, self.consumer, self.token)
body = req.to_postdata()
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Content-Length': str(len(body))
}
return httplib2.Http.request(self, uri, method='POST',
body=body, headers=headers)
class OAuthException(RuntimeError):
"""Raised if authorization fails for some reason."""
message = None
type = None
def __init__(self, message, type=None, data=None):
#: A helpful error message for debugging
self.message = message
#: A unique type for this exception if available.
self.type = type
#: If available, the parsed data from the remote API that can be
#: used to pointpoint the error.
self.data = data
def __str__(self):
return self.message.encode('utf-8')
def __unicode__(self):
return self.message
class OAuth(object):
"""Registry for remote applications. In the future this will also
be the central class for OAuth provider functionality.
"""
def __init__(self):
self.remote_apps = {}
def remote_app(self, name, register=True, **kwargs):
"""Registers a new remote applicaton. If `param` register is
set to `False` the application is not registered in the
:attr:`remote_apps` dictionary. The keyword arguments are
forwarded to the :class:`OAuthRemoteApp` consturctor.
"""
app = OAuthRemoteApp(self, name, **kwargs)
if register:
assert name not in self.remote_apps, \
'application already registered'
self.remote_apps[name] = app
return app
class OAuthRemoteApp(object):
"""Represents a remote application.
:param oauth: the associated :class:`OAuth` object.
:param name: then name of the remote application
:param request_token_url: the URL for requesting new tokens
:param access_token_url: the URL for token exchange
:param authorize_url: the URL for authorization
:param consumer_key: the application specific consumer key
:param consumer_secret: the application specific consumer secret
:param request_token_params: an optional dictionary of parameters
to forward to the request token URL
or authorize URL depending on oauth
version.
:param access_token_params: an option diction of parameters to forward to
the access token URL
:param access_token_method: the HTTP method that should be used
for the access_token_url. Defaults
to ``'GET'``.
"""
def __init__(self, oauth, name, base_url,
request_token_url,
access_token_url, authorize_url,
consumer_key, consumer_secret,
request_token_params=None,
access_token_params=None,
access_token_method='GET'):
self.oauth = oauth
#: the `base_url` all URLs are joined with.
self.base_url = base_url
self.name = name
self.request_token_url = request_token_url
self.access_token_url = access_token_url
self.authorize_url = authorize_url
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.tokengetter_func = None
self.request_token_params = request_token_params or {}
self.access_token_params = access_token_params or {}
self.access_token_method = access_token_method
self._consumer = oauth2.Consumer(self.consumer_key,
self.consumer_secret)
self._client = OAuthClient(self._consumer)
def status_okay(self, resp):
"""Given request data, checks if the status is okay."""
try:
return int(resp['status']) in (200, 201)
except ValueError:
return False
def get(self, *args, **kwargs):
"""Sends a ``GET`` request. Accepts the same parameters as
:meth:`request`.
"""
kwargs['method'] = 'GET'
return self.request(*args, **kwargs)
def post(self, *args, **kwargs):
"""Sends a ``POST`` request. Accepts the same parameters as
:meth:`request`.
"""
kwargs['method'] = 'POST'
return self.request(*args, **kwargs)
def put(self, *args, **kwargs):
"""Sends a ``PUT`` request. Accepts the same parameters as
:meth:`request`.
"""
kwargs['method'] = 'PUT'
return self.request(*args, **kwargs)
def delete(self, *args, **kwargs):
"""Sends a ``DELETE`` request. Accepts the same parameters as
:meth:`request`.
"""
kwargs['method'] = 'DELETE'
return self.request(*args, **kwargs)
def make_client(self, token=None):
"""Creates a new `oauth2` Client object with the token attached.
Usually you don't have to do that but use the :meth:`request`
method instead.
"""
return oauth2.Client(self._consumer, self.get_request_token(token))
def request(self, url, data="", headers=None, format='urlencoded',
method='GET', content_type=None, token=None):
"""Sends a request to the remote server with OAuth tokens attached.
The `url` is joined with :attr:`base_url` if the URL is relative.
.. versionadded:: 0.12
added the `token` parameter.
:param url: where to send the request to
:param data: the data to be sent to the server. If the request method
is ``GET`` the data is appended to the URL as query
parameters, otherwise encoded to `format` if the format
is given. If a `content_type` is provided instead, the
data must be a string encoded for the given content
type and used as request body.
:param headers: an optional dictionary of headers.
:param format: the format for the `data`. Can be `urlencoded` for
URL encoded data or `json` for JSON.
:param method: the HTTP request method to use.
:param content_type: an optional content type. If a content type is
provided, the data is passed as it and the
`format` parameter is ignored.
:param token: an optional token to pass to tokengetter. Use this if you
want to support sending requests using multiple tokens.
If you set this to anything not None, `tokengetter_func`
will receive the given token as an argument, in which case
the tokengetter should return the `(token, secret)` tuple
for the given token.
:return: an :class:`OAuthResponse` object.
"""
headers = dict(headers or {})
client = self.make_client(token)
url = self.expand_url(url)
if method == 'GET':
assert format == 'urlencoded'
if data:
url = add_query(url, data)
data = ""
else:
if content_type is None:
data, content_type = encode_request_data(data, format)
if content_type is not None:
headers['Content-Type'] = content_type
return OAuthResponse(*client.request(url, method=method,
body=data or '',
headers=headers))
def expand_url(self, url):
return urljoin(self.base_url, url)
def generate_request_token(self, callback=None):
if callback is not None:
callback = urljoin(request.url, callback)
resp, content = self._client.request_new_token(
self.expand_url(self.request_token_url), callback,
self.request_token_params)
if not self.status_okay(resp):
raise OAuthException('Failed to generate request token',
type='token_generation_failed')
data = parse_response(resp, content)
if data is None:
raise OAuthException('Invalid token response from ' + self.name,
type='token_generation_failed')
tup = (data['oauth_token'], data['oauth_token_secret'])
session[self.name + '_oauthtok'] = tup
return tup
def get_request_token(self, token=None):
assert self.tokengetter_func is not None, 'missing tokengetter function'
# Don't pass the token if the token is None to support old
# tokengetter functions.
rv = self.tokengetter_func(*(token and (token,) or ()))
if rv is None:
rv = session.get(self.name + '_oauthtok')
if rv is None:
raise OAuthException('No token available', type='token_missing')
return oauth2.Token(*rv)
def free_request_token(self):
session.pop(self.name + '_oauthtok', None)
session.pop(self.name + '_oauthredir', None)
def authorize(self, callback=None):
"""Returns a redirect response to the remote authorization URL with
the signed callback given. The callback must be `None` in which
case the application will most likely switch to PIN based authentication
or use a remotely stored callback URL. Alternatively it's an URL
on the system that has to be decorated as :meth:`authorized_handler`.
"""
if self.request_token_url:
token = self.generate_request_token(callback)[0]
url = '%s?oauth_token=%s' % (self.expand_url(self.authorize_url),
url_quote(token))
else:
assert callback is not None, 'Callback is required OAuth2'
# This is for things like facebook's oauth. Since we need the
# callback for the access_token_url we need to keep it in the
# session.
params = dict(self.request_token_params)
params['redirect_uri'] = callback
params['client_id'] = self.consumer_key
params['response_type'] = 'code'
session[self.name + '_oauthredir'] = callback
url = add_query(self.expand_url(self.authorize_url), params)
return redirect(url)
def tokengetter(self, f):
"""Registers a function as tokengetter. The tokengetter has to return
a tuple of ``(token, secret)`` with the user's token and token secret.
If the data is unavailable, the function must return `None`.
If the `token` parameter is passed to the request function it's
forwarded to the tokengetter function::
@oauth.tokengetter
def get_token(token='user'):
if token == 'user':
return find_the_user_token()
elif token == 'app':
return find_the_app_token()
raise RuntimeError('invalid token')
"""
self.tokengetter_func = f
return f
def handle_oauth1_response(self):
"""Handles an oauth1 authorization response. The return value of
this method is forwarded as first argument to the handling view
function.
"""
client = self.make_client()
resp, content = client.request('%s?oauth_verifier=%s' % (
self.expand_url(self.access_token_url),
request.args['oauth_verifier']
), self.access_token_method)
data = parse_response(resp, content)
if not self.status_okay(resp):
raise OAuthException('Invalid response from ' + self.name,
type='invalid_response', data=data)
return data
def handle_oauth2_response(self):
"""Handles an oauth2 authorization response. The return value of
this method is forwarded as first argument to the handling view
function.
"""
remote_args = {
'code': request.args.get('code'),
'client_id': self.consumer_key,
'client_secret': self.consumer_secret,
'redirect_uri': session.get(self.name + '_oauthredir')
}
remote_args.update(self.access_token_params)
if self.access_token_method == 'POST':
resp, content = self._client.request(self.expand_url(self.access_token_url),
self.access_token_method,
url_encode(remote_args))
elif self.access_token_method == 'GET':
url = add_query(self.expand_url(self.access_token_url), remote_args)
resp, content = self._client.request(url, self.access_token_method)
else:
raise OAuthException('Unsupported access_token_method: ' +
self.access_token_method)
data = parse_response(resp, content)
if not self.status_okay(resp):
raise OAuthException('Invalid response from ' + self.name,
type='invalid_response', data=data)
return data
def handle_unknown_response(self):
"""Called if an unknown response came back from the server. This
usually indicates a denied response. The default implementation
just returns `None`.
"""
return None
def authorized_handler(self, f):
"""Injects additional authorization functionality into the function.
The function will be passed the response object as first argument
if the request was allowed, or `None` if access was denied. When the
authorized handler is called, the temporary issued tokens are already
destroyed.
"""
@wraps(f)
def decorated(*args, **kwargs):
if 'oauth_verifier' in request.args:
data = self.handle_oauth1_response()
elif 'code' in request.args:
data = self.handle_oauth2_response()
else:
data = self.handle_unknown_response()
self.free_request_token()
return f(*((data,) + args), **kwargs)
return decorated
| bsd-3-clause |
SaganBolliger/nupic | examples/opf/experiments/missing_record/simple_0/description.py | 32 | 1722 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/simple_0.csv'),
'windowSize': 25,
'modelParams': {
'sensorParams': {
'verbosity': 0,
'encoders': {
'timestamp_timeOfDay': None,
'timestamp_dayOfWeek': None,
'field2': None,
}
},
'clParams': {
'clVerbosity': 0,
}
}
}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| agpl-3.0 |
vipul-sharma20/oh-mainline | mysite/profile/migrations/0064_person_location_flags.py | 17 | 17461 | # This file is part of OpenHatch.
# Copyright (C) 2010 Parker Phinney
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.profile.models import *
class Migration:
def forwards(self, orm):
# Adding field 'Person.dont_guess_my_location'
db.add_column('profile_person', 'dont_guess_my_location', orm['profile.person:dont_guess_my_location'])
# Adding field 'Person.location_confirmed'
db.add_column('profile_person', 'location_confirmed', orm['profile.person:location_confirmed'])
# Changing field 'DataImportAttempt.date_created'
# (to signature: django.db.models.fields.DateTimeField(default=datetime.datetime(2010, 1, 8, 2, 5, 52, 874470)))
db.alter_column('profile_dataimportattempt', 'date_created', orm['profile.dataimportattempt:date_created'])
# Changing field 'PortfolioEntry.date_created'
# (to signature: django.db.models.fields.DateTimeField(default=datetime.datetime(2010, 1, 8, 2, 5, 53, 626238)))
db.alter_column('profile_portfolioentry', 'date_created', orm['profile.portfolioentry:date_created'])
# Changing field 'Citation.date_created'
# (to signature: django.db.models.fields.DateTimeField(default=datetime.datetime(2010, 1, 8, 2, 5, 53, 701703)))
db.alter_column('profile_citation', 'date_created', orm['profile.citation:date_created'])
def backwards(self, orm):
# Deleting field 'Person.dont_guess_my_location'
db.delete_column('profile_person', 'dont_guess_my_location')
# Deleting field 'Person.location_confirmed'
db.delete_column('profile_person', 'location_confirmed')
# Changing field 'DataImportAttempt.date_created'
# (to signature: django.db.models.fields.DateTimeField(default=datetime.datetime(2010, 1, 6, 19, 33, 4, 150522)))
db.alter_column('profile_dataimportattempt', 'date_created', orm['profile.dataimportattempt:date_created'])
# Changing field 'PortfolioEntry.date_created'
# (to signature: django.db.models.fields.DateTimeField(default=datetime.datetime(2010, 1, 6, 19, 33, 3, 755043)))
db.alter_column('profile_portfolioentry', 'date_created', orm['profile.portfolioentry:date_created'])
# Changing field 'Citation.date_created'
# (to signature: django.db.models.fields.DateTimeField(default=datetime.datetime(2010, 1, 6, 19, 33, 4, 508411)))
db.alter_column('profile_citation', 'date_created', orm['profile.citation:date_created'])
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'customs.webresponse': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response_headers': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'text': ('django.db.models.fields.TextField', [], {}),
'url': ('django.db.models.fields.TextField', [], {})
},
'profile.citation': {
'contributor_role': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'data_import_attempt': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.DataImportAttempt']", 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 1, 8, 2, 5, 54, 480532)'}),
'distinct_months': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'first_commit_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_due_to_duplicate': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'old_summary': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True'}),
'portfolio_entry': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.PortfolioEntry']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'profile.dataimportattempt': {
'completed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 1, 8, 2, 5, 55, 84043)'}),
'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']"}),
'query': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'web_response': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['customs.WebResponse']", 'null': 'True'})
},
'profile.link_person_tag': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Tag']"})
},
'profile.link_project_tag': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Tag']"})
},
'profile.link_projectexp_tag': {
'Meta': {'unique_together': "[('tag', 'project_exp', 'source')]"},
'favorite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project_exp': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.ProjectExp']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Tag']"})
},
'profile.link_sf_proj_dude_fm': {
'Meta': {'unique_together': "[('person', 'project')]"},
'date_collected': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.SourceForgePerson']"}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.SourceForgeProject']"})
},
'profile.person': {
'blacklisted_repository_committers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profile.RepositoryCommitter']"}),
'dont_guess_my_location': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'gotten_name_from_ohloh': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interested_in_working_on': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}),
'last_polled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'}),
'location_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'location_display_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100'}),
'photo_thumbnail': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100', 'null': 'True'}),
'show_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'profile.portfolioentry': {
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 1, 8, 2, 5, 54, 835206)'}),
'experience_description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'project_description': ('django.db.models.fields.TextField', [], {})
},
'profile.projectexp': {
'data_import_attempt': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.DataImportAttempt']", 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'man_months': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'modified': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']", 'null': 'True'}),
'person_role': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'primary_language': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'should_show_this': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'profile.repositorycommitter': {
'Meta': {'unique_together': "(('project', 'data_import_attempt'),)"},
'data_import_attempt': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.DataImportAttempt']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"})
},
'profile.sourceforgeperson': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'profile.sourceforgeproject': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'unixname': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'profile.tag': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.TagType']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'profile.tagtype': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'search.project': {
'date_icon_was_fetched_from_ohloh': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'icon_for_profile': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_for_search_result': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_raw': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_smaller_for_badge': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
}
}
complete_apps = ['profile']
| agpl-3.0 |
jsilter/scipy | scipy/integrate/__init__.py | 46 | 1899 | """
=============================================
Integration and ODEs (:mod:`scipy.integrate`)
=============================================
.. currentmodule:: scipy.integrate
Integrating functions, given function object
============================================
.. autosummary::
:toctree: generated/
quad -- General purpose integration
dblquad -- General purpose double integration
tplquad -- General purpose triple integration
nquad -- General purpose n-dimensional integration
fixed_quad -- Integrate func(x) using Gaussian quadrature of order n
quadrature -- Integrate with given tolerance using Gaussian quadrature
romberg -- Integrate func using Romberg integration
Integrating functions, given fixed samples
==========================================
.. autosummary::
:toctree: generated/
cumtrapz -- Use trapezoidal rule to cumulatively compute integral.
simps -- Use Simpson's rule to compute integral from samples.
romb -- Use Romberg Integration to compute integral from
-- (2**k + 1) evenly-spaced samples.
.. seealso::
:mod:`scipy.special` for orthogonal polynomials (special) for Gaussian
quadrature roots and weights for other weighting factors and regions.
Integrators of ODE systems
==========================
.. autosummary::
:toctree: generated/
odeint -- General integration of ordinary differential equations.
ode -- Integrate ODE using VODE and ZVODE routines.
complex_ode -- Convert a complex-valued ODE to real-valued and integrate.
"""
from __future__ import division, print_function, absolute_import
from .quadrature import *
from .odepack import *
from .quadpack import *
from ._ode import *
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.testing import Tester
test = Tester().test
| bsd-3-clause |
frankyrumple/ope | admin_app/gluon/contrib/markmin/markmin2html.py | 8 | 55792 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# created by Massimo Di Pierro
# recreated by Vladyslav Kozlovskyy
# license MIT/BSD/GPL
import re
import urllib
from cgi import escape
from string import maketrans
try:
from ast import parse as ast_parse
import ast
except ImportError: # python 2.5
from compiler import parse
import compiler.ast as ast
"""
TODO: next version should use MathJax
<script type="text/javascript" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js">
MathJax.Hub.Config({
extensions: ["tex2jax.js","TeX/AMSmath.js","TeX/AMSsymbols.js"],
jax: ["input/TeX", "output/HTML-CSS"],
tex2jax: {
inlineMath: [ ['$','$'], ["\\(","\\)"] ],
displayMath: [ ['$$','$$'], ["\\[","\\]"] ],
},
"HTML-CSS": { availableFonts: ["TeX"] }
});
</script>
"""
__all__ = ['render', 'markmin2html', 'markmin_escape']
__doc__ = """
# Markmin markup language
## About
This is a new markup language that we call markmin designed to produce high quality scientific papers and books and also put them online. We provide serializers for html, latex and pdf. It is implemented in the ``markmin2html`` function in the ``markmin2html.py``.
Example of usage:
``
m = "Hello **world** [[link http://web2py.com]]"
from markmin2html import markmin2html
print markmin2html(m)
from markmin2latex import markmin2latex
print markmin2latex(m)
from markmin2pdf import markmin2pdf # requires pdflatex
print markmin2pdf(m)
``
====================
# This is a test block
with new features:
This is a blockquote with
a list with tables in it:
-----------
This is a paragraph before list.
You can continue paragraph on the
next lines.
This is an ordered list with tables:
+ Item 1
+ Item 2
+ --------
aa|bb|cc
11|22|33
--------:tableclass1[tableid1]
+ Item 4
-----------
T1| T2| t3
===========
aaa|bbb|ccc
ddd|fff|ggg
123|0 |5.0
-----------:tableclass1
-----------:blockquoteclass[blockquoteid]
This this a new paragraph
with a followed table.
Table has header, footer, sections,
odd and even rows:
-------------------------------
**Title 1**|**Title 2**|**Title 3**
==============================
data 1 | data 2 | 2.00
data 3 |data4(long)| 23.00
|data 5 | 33.50
==============================
New section|New data | 5.00
data 1 |data2(long)|100.45
|data 3 | 12.50
data 4 | data 5 | .33
data 6 |data7(long)| 8.01
|data 8 | 514
==============================
Total: | 9 items |698,79
------------------------------:tableclass1[tableid2]
## Multilevel
lists
Now lists can be multilevel:
+ Ordered item 1 on level 1.
You can continue item text on
next strings
. paragraph in an item
++. Ordered item 1 of sublevel 2 with
a paragraph (paragraph can start
with point after plus or minus
characters, e.g. **++.** or **--.**)
++. This is another item. But with 3 paragraphs,
blockquote and sublists:
.. This is the second paragraph in the item. You
can add paragraphs to an item, using point
notation, where first characters in the string
are sequence of points with space between
them and another string. For example, this
paragraph (in sublevel 2) starts with two points:
``.. This is the second paragraph...``
.. ----------
### this is a blockquote in a list
You can use blockquote with headers, paragraphs,
tables and lists in it:
Tables can have or have not header and footer.
This table is defined without any header
and footer in it:
---------------------
red |fox | 0
blue |dolphin | 1000
green|leaf | 10000
---------------------
----------
.. This is yet another paragraph in the item.
--- This is an item of unordered list **(sublevel 3)**
--- This is the second item of the unordered list ''(sublevel 3)''
++++++ This is a single item of ordered list in sublevel 6
.... and this is a paragraph in sublevel 4
---. This is a new item with paragraph in sublevel 3.
++++ Start ordered list in sublevel 4 with code block: ``
line 1
line 2
line 3
``
++++. Yet another item with code block (we need to indent \`\` to add code block as part of item):
``
line 1
line 2
line 3
``
This item finishes with this paragraph.
... Item in sublevel 3 can be continued with paragraphs.
... ``
this is another
code block
in the
sublevel 3 item
``
+++ The last item in sublevel 3
.. This is a continuous paragraph for item 2 in sublevel 2.
You can use such structure to create difficult structured
documents.
++ item 3 in sublevel 2
-- item 1 in sublevel 2 (new unordered list)
-- item 2 in sublevel 2
-- item 3 in sublevel 2
++ item 1 in sublevel 2 (new ordered list)
++ item 2 in sublevel 2
++ item 3 in sublevle 2
+ item 2 in level 1
+ item 3 in level 1
- new unordered list (item 1 in level 1)
- level 2 in level 1
- level 3 in level 1
- level 4 in level 1
## This is the last section of the test
Single paragraph with '----' in it will be turned into separator:
-----------
And this is the last paragraph in
the test. Be happy!
====================
## Why?
We wanted a markup language with the following requirements:
- less than 300 lines of functional code
- easy to read
- secure
- support table, ul, ol, code
- support html5 video and audio elements (html serialization only)
- can align images and resize them
- can specify class for tables, blockquotes and code elements
- can add anchors
- does not use _ for markup (since it creates odd behavior)
- automatically links urls
- fast
- easy to extend
- supports latex and pdf including references
- allows to describe the markup in the markup (this document is generated from markmin syntax)
(results depend on text but in average for text ~100K markmin is 30% faster than markdown, for text ~10K it is 10x faster)
The [[web2py book http://www.lulu.com/product/paperback/web2py-%283rd-edition%29/12822827]] published by lulu, for example, was entirely generated with markmin2pdf from the online [[web2py wiki http://www.web2py.com/book]]
## Download
- http://web2py.googlecode.com/hg/gluon/contrib/markmin/markmin2html.py
- http://web2py.googlecode.com/hg/gluon/contrib/markmin/markmin2latex.py
- http://web2py.googlecode.com/hg/gluon/contrib/markmin/markmin2pdf.py
markmin2html.py and markmin2latex.py are single files and have no web2py dependence. Their license is BSD.
## Examples
### Bold, italic, code and links
------------------------------------------------------------------------------
**SOURCE** | **OUTPUT**
==============================================================================
``# title`` | **title**
``## section`` | **section**
``### subsection`` | **subsection**
``**bold**`` | **bold**
``''italic''`` | ''italic''
``~~strikeout~~`` | ~~strikeout~~
``!`!`verbatim`!`!`` | ``verbatim``
``\`\`color with **bold**\`\`:red`` | ``color with **bold**``:red
``\`\`many colors\`\`:color[blue:#ffff00]`` | ``many colors``:color[blue:#ffff00]
``http://google.com`` | http://google.com
``[[**click** me #myanchor]]`` | [[**click** me #myanchor]]
``[[click me [extra info] #myanchor popup]]`` | [[click me [extra info] #myanchor popup]]
-------------------------------------------------------------------------------
### More on links
The format is always ``[[title link]]`` or ``[[title [extra] link]]``. Notice you can nest bold, italic, strikeout and code inside the link ``title``.
### Anchors [[myanchor]]
You can place an anchor anywhere in the text using the syntax ``[[name]]`` where ''name'' is the name of the anchor.
You can then link the anchor with [[link #myanchor]], i.e. ``[[link #myanchor]]`` or [[link with an extra info [extra info] #myanchor]], i.e.
``[[link with an extra info [extra info] #myanchor]]``.
### Images
[[alt-string for the image [the image title] http://www.web2py.com/examples/static/web2py_logo.png right 200px]]
This paragraph has an image aligned to the right with a width of 200px. Its is placed using the code
``[[alt-string for the image [the image title] http://www.web2py.com/examples/static/web2py_logo.png right 200px]]``.
### Unordered Lists
``
- Dog
- Cat
- Mouse
``
is rendered as
- Dog
- Cat
- Mouse
Two new lines between items break the list in two lists.
### Ordered Lists
``
+ Dog
+ Cat
+ Mouse
``
is rendered as
+ Dog
+ Cat
+ Mouse
### Multilevel Lists
``
+ Dogs
-- red
-- brown
-- black
+ Cats
-- fluffy
-- smooth
-- bald
+ Mice
-- small
-- big
-- huge
``
is rendered as
+ Dogs
-- red
-- brown
-- black
+ Cats
-- fluffy
-- smooth
-- bald
+ Mice
-- small
-- big
-- huge
### Tables (with optional header and/or footer)
Something like this
``
-----------------
**A**|**B**|**C**
=================
0 | 0 | X
0 | X | 0
X | 0 | 0
=================
**D**|**F**|**G**
-----------------:abc[id]
``
is a table and is rendered as
-----------------
**A**|**B**|**C**
=================
0 | 0 | X
0 | X | 0
X | 0 | 0
=================
**D**|**F**|**G**
-----------------:abc[id]
Four or more dashes delimit the table and | separates the columns.
The ``:abc``, ``:id[abc_1]`` or ``:abc[abc_1]`` at the end sets the class and/or id for the table and it is optional.
### Blockquote
A table with a single cell is rendered as a blockquote:
-----
Hello world
-----
Blockquote can contain headers, paragraphs, lists and tables:
``
-----
This is a paragraph in a blockquote
+ item 1
+ item 2
-- item 2.1
-- item 2.2
+ item 3
---------
0 | 0 | X
0 | X | 0
X | 0 | 0
---------:tableclass1
-----
``
is rendered as:
-----
This is a paragraph in a blockquote
+ item 1
+ item 2
-- item 2.1
-- item 2.2
+ item 3
---------
0 | 0 | X
0 | X | 0
X | 0 | 0
---------:tableclass1
-----
### Code, ``<code>``, escaping and extra stuff
``
def test():
return "this is Python code"
``:python
Optionally a ` inside a ``!`!`...`!`!`` block can be inserted escaped with !`!.
**NOTE:** You can escape markmin constructions (\\'\\',\`\`,\*\*,\~\~,\[,\{,\]\},\$,\@) with '\\\\' character:
so \\\\`\\\\` can replace !`!`! escape string
The ``:python`` after the markup is also optional. If present, by default, it is used to set the class of the <code> block.
The behavior can be overridden by passing an argument ``extra`` to the ``render`` function. For example:
``
markmin2html("!`!!`!aaa!`!!`!:custom",
extra=dict(custom=lambda text: 'x'+text+'x'))
``:python
generates
``'xaaax'``:python
(the ``!`!`...`!`!:custom`` block is rendered by the ``custom=lambda`` function passed to ``render``).
### Line breaks
``[[NEWLINE]]`` tag is used to break lines:
``
#### Multiline [[NEWLINE]]
title
paragraph [[NEWLINE]]
with breaks[[NEWLINE]]in it
``
generates:
#### Multiline [[NEWLINE]]
title
paragraph [[NEWLINE]]
with breaks[[NEWLINE]]in it
### Html5 support
Markmin also supports the <video> and <audio> html5 tags using the notation:
``
[[message link video]]
[[message link audio]]
[[message [title] link video]]
[[message [title] link audio]]
``
where ``message`` will be shown in browsers without HTML5 video/audio tags support.
### Latex and other extensions
Formulas can be embedded into HTML with ''\$\$``formula``\$\$''.
You can use Google charts to render the formula:
``
LATEX = '<img src="http://chart.apis.google.com/chart?cht=tx&chl=%s" />'
markmin2html(text,{'latex':lambda code: LATEX % urllib.quote(code)})
``
### Code with syntax highlighting
This requires a syntax highlighting tool, such as the web2py CODE helper.
``
extra={'code_cpp':lambda text: CODE(text,language='cpp').xml(),
'code_java':lambda text: CODE(text,language='java').xml(),
'code_python':lambda text: CODE(text,language='python').xml(),
'code_html':lambda text: CODE(text,language='html').xml()}
``
or simple:
``
extra={'code':lambda text,lang='python': CODE(text,language=lang).xml()}
``
``
markmin2html(text,extra=extra)
``
Code can now be marked up as in this example:
``
!`!`
<html><body>example</body></html>
!`!`:code_html
``
OR
``
!`!`
<html><body>example</body></html>
!`!`:code[html]
``
### Citations and References
Citations are treated as internal links in html and proper citations in latex if there is a final section called "References". Items like
``
- [[key]] value
``
in the References will be translated into Latex
``
\\bibitem{key} value
``
Here is an example of usage:
``
As shown in Ref.!`!`mdipierro`!`!:cite
## References
- [[mdipierro]] web2py Manual, 3rd Edition, lulu.com
``
### Caveats
``<ul/>``, ``<ol/>``, ``<code/>``, ``<table/>``, ``<blockquote/>``, ``<h1/>``, ..., ``<h6/>`` do not have ``<p>...</p>`` around them.
"""
html_colors = ['aqua', 'black', 'blue', 'fuchsia', 'gray', 'green',
'lime', 'maroon', 'navy', 'olive', 'purple', 'red',
'silver', 'teal', 'white', 'yellow']
META = '\x06'
LINK = '\x07'
DISABLED_META = '\x08'
LATEX = '<img src="http://chart.apis.google.com/chart?cht=tx&chl=%s" />'
regex_URL = re.compile(r'@/(?P<a>\w*)/(?P<c>\w*)/(?P<f>\w*(\.\w+)?)(/(?P<args>[\w\.\-/]+))?')
regex_env2 = re.compile(r'@\{(?P<a>[\w\-\.]+?)(\:(?P<b>.*?))?\}')
regex_expand_meta = re.compile('(' + META + '|' + DISABLED_META + '|````)')
regex_dd = re.compile(r'\$\$(?P<latex>.*?)\$\$')
regex_code = re.compile(
'(' + META + '|' + DISABLED_META + r'|````)|(``(?P<t>.+?)``(?::(?P<c>[a-zA-Z][_a-zA-Z\-\d]*)(?:\[(?P<p>[^\]]*)\])?)?)',
re.S)
regex_strong = re.compile(r'\*\*(?P<t>[^\s*]+( +[^\s*]+)*)\*\*')
regex_del = re.compile(r'~~(?P<t>[^\s*]+( +[^\s*]+)*)~~')
regex_em = re.compile(r"''(?P<t>([^\s']| |'(?!'))+)''")
regex_num = re.compile(r"^\s*[+-]?((\d+(\.\d*)?)|\.\d+)([eE][+-]?[0-9]+)?\s*$")
regex_list = re.compile('^(?:(?:(#{1,6})|(?:(\.+|\++|\-+)(\.)?))\s*)?(.*)$')
regex_bq_headline = re.compile('^(?:(\.+|\++|\-+)(\.)?\s+)?(-{3}-*)$')
regex_tq = re.compile('^(-{3}-*)(?::(?P<c>[a-zA-Z][_a-zA-Z\-\d]*)(?:\[(?P<p>[a-zA-Z][_a-zA-Z\-\d]*)\])?)?$')
regex_proto = re.compile(r'(?<!["\w>/=])(?P<p>\w+):(?P<k>\w+://[\w\d\-+=?%&/:.]+)', re.M)
regex_auto = re.compile(r'(?<!["\w>/=])(?P<k>\w+://[\w\d\-+_=?%&/:.,;#]+\w|[\w\-.]+@[\w\-.]+)', re.M)
regex_link = re.compile(r'(' + LINK + r')|\[\[(?P<s>.+?)\]\]', re.S)
regex_link_level2 = re.compile(r'^(?P<t>\S.*?)?(?:\s+\[(?P<a>.+?)\])?(?:\s+(?P<k>\S+))?(?:\s+(?P<p>popup))?\s*$', re.S)
regex_media_level2 = re.compile(
r'^(?P<t>\S.*?)?(?:\s+\[(?P<a>.+?)\])?(?:\s+(?P<k>\S+))?\s+(?P<p>img|IMG|left|right|center|video|audio|blockleft|blockright)(?:\s+(?P<w>\d+px))?\s*$',
re.S)
regex_markmin_escape = re.compile(r"(\\*)(['`:*~\\[\]{}@\$+\-.#\n])")
regex_backslash = re.compile(r"\\(['`:*~\\[\]{}@\$+\-.#\n])")
ttab_in = maketrans("'`:*~\\[]{}@$+-.#\n", '\x0b\x0c\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x05')
ttab_out = maketrans('\x0b\x0c\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x05', "'`:*~\\[]{}@$+-.#\n")
regex_quote = re.compile('(?P<name>\w+?)\s*\=\s*')
def make_dict(b):
return '{%s}' % regex_quote.sub("'\g<name>':", b)
def safe_eval(node_or_string, env):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
and None.
"""
_safe_names = {'None': None, 'True': True, 'False': False}
_safe_names.update(env)
if isinstance(node_or_string, basestring):
node_or_string = ast_parse(node_or_string, mode='eval')
if isinstance(node_or_string, ast.Expression):
node_or_string = node_or_string.body
def _convert(node):
if isinstance(node, ast.Str):
return node.s
elif isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.Tuple):
return tuple(map(_convert, node.elts))
elif isinstance(node, ast.List):
return list(map(_convert, node.elts))
elif isinstance(node, ast.Dict):
return dict((_convert(k), _convert(v)) for k, v
in zip(node.keys, node.values))
elif isinstance(node, ast.Name):
if node.id in _safe_names:
return _safe_names[node.id]
elif isinstance(node, ast.BinOp) and \
isinstance(node.op, (Add, Sub)) and \
isinstance(node.right, Num) and \
isinstance(node.right.n, complex) and \
isinstance(node.left, Num) and \
isinstance(node.left.n, (int, long, float)):
left = node.left.n
right = node.right.n
if isinstance(node.op, Add):
return left + right
else:
return left - right
raise ValueError('malformed string')
return _convert(node_or_string)
def markmin_escape(text):
""" insert \\ before markmin control characters: '`:*~[]{}@$ """
return regex_markmin_escape.sub(
lambda m: '\\' + m.group(0).replace('\\', '\\\\'), text)
def replace_autolinks(text, autolinks):
return regex_auto.sub(lambda m: autolinks(m.group('k')), text)
def replace_at_urls(text, url):
# this is experimental @{function/args}
def u1(match, url=url):
a, c, f, args = match.group('a', 'c', 'f', 'args')
return url(a=a or None, c=c or None, f=f or None,
args=(args or '').split('/'), scheme=True, host=True)
return regex_URL.sub(u1, text)
def replace_components(text, env):
# not perfect but acceptable
def u2(match, env=env):
f = env.get(match.group('a'), match.group(0))
if callable(f):
b = match.group('b')
try:
b = safe_eval(make_dict(b), env)
except:
pass
try:
f = f(**b) if isinstance(b, dict) else f(b)
except Exception, e:
f = 'ERROR: %s' % e
return str(f)
text = regex_env2.sub(u2, text)
return text
def autolinks_simple(url):
"""
it automatically converts the url to link,
image, video or audio tag
"""
u_url = url.lower()
if '@' in url and '://' not in url:
return '<a href="mailto:%s">%s</a>' % (url, url)
elif u_url.endswith(('.jpg', '.jpeg', '.gif', '.png')):
return '<img src="%s" controls />' % url
elif u_url.endswith(('.mp4', '.mpeg', '.mov', '.ogv')):
return '<video src="%s" controls></video>' % url
elif u_url.endswith(('.mp3', '.wav', '.ogg')):
return '<audio src="%s" controls></audio>' % url
return '<a href="%s">%s</a>' % (url, url)
def protolinks_simple(proto, url):
"""
it converts url to html-string using appropriate proto-prefix:
Uses for construction "proto:url", e.g.:
"iframe:http://www.example.com/path" will call protolinks()
with parameters:
proto="iframe"
url="http://www.example.com/path"
"""
if proto in ('iframe', 'embed'): # == 'iframe':
return '<iframe src="%s" frameborder="0" allowfullscreen></iframe>' % url
# elif proto == 'embed': # NOTE: embed is a synonym to iframe now
# return '<a href="%s" class="%sembed">%s></a>'%(url,class_prefix,url)
elif proto == 'qr':
return '<img style="width:100px" src="http://chart.apis.google.com/chart?cht=qr&chs=100x100&chl=%s&choe=UTF-8&chld=H" alt="QR Code" title="QR Code" />' % url
return proto + ':' + url
def email_simple(email):
return '<a href="mailto:%s">%s</a>' % (email, email)
def render(text,
extra={},
allowed={},
sep='p',
URL=None,
environment=None,
latex='google',
autolinks='default',
protolinks='default',
class_prefix='',
id_prefix='markmin_',
pretty_print=False):
"""
Arguments:
- text is the text to be processed
- extra is a dict like extra=dict(custom=lambda value: value) that process custom code
as in " ``this is custom code``:custom "
- allowed is a dictionary of list of allowed classes like
allowed = dict(code=('python','cpp','java'))
- sep can be 'p' to separate text in <p>...</p>
or can be 'br' to separate text using <br />
- URL -
- environment is a dictionary of environment variables (can be accessed with @{variable}
- latex -
- autolinks is a function to convert auto urls to html-code (default is autolinks(url) )
- protolinks is a function to convert proto-urls (e.g."proto:url") to html-code
(default is protolinks(proto,url))
- class_prefix is a prefix for ALL classes in markmin text. E.g. if class_prefix='my_'
then for ``test``:cls class will be changed to "my_cls" (default value is '')
- id_prefix is prefix for ALL ids in markmin text (default value is 'markmin_'). E.g.:
-- [[id]] will be converted to <span class="anchor" id="markmin_id"></span>
-- [[link #id]] will be converted to <a href="#markmin_id">link</a>
-- ``test``:cls[id] will be converted to <code class="cls" id="markmin_id">test</code>
>>> render('this is\\n# a section\\n\\nparagraph')
'<p>this is</p><h1>a section</h1><p>paragraph</p>'
>>> render('this is\\n## a subsection\\n\\nparagraph')
'<p>this is</p><h2>a subsection</h2><p>paragraph</p>'
>>> render('this is\\n### a subsubsection\\n\\nparagraph')
'<p>this is</p><h3>a subsubsection</h3><p>paragraph</p>'
>>> render('**hello world**')
'<p><strong>hello world</strong></p>'
>>> render('``hello world``')
'<code>hello world</code>'
>>> render('``hello world``:python')
'<code class="python">hello world</code>'
>>> render('``\\nhello\\nworld\\n``:python')
'<pre><code class="python">hello\\nworld</code></pre>'
>>> render('``hello world``:python[test_id]')
'<code class="python" id="markmin_test_id">hello world</code>'
>>> render('``hello world``:id[test_id]')
'<code id="markmin_test_id">hello world</code>'
>>> render('``\\nhello\\nworld\\n``:python[test_id]')
'<pre><code class="python" id="markmin_test_id">hello\\nworld</code></pre>'
>>> render('``\\nhello\\nworld\\n``:id[test_id]')
'<pre><code id="markmin_test_id">hello\\nworld</code></pre>'
>>> render("''hello world''")
'<p><em>hello world</em></p>'
>>> render('** hello** **world**')
'<p>** hello** <strong>world</strong></p>'
>>> render('- this\\n- is\\n- a list\\n\\nand this\\n- is\\n- another')
'<ul><li>this</li><li>is</li><li>a list</li></ul><p>and this</p><ul><li>is</li><li>another</li></ul>'
>>> render('+ this\\n+ is\\n+ a list\\n\\nand this\\n+ is\\n+ another')
'<ol><li>this</li><li>is</li><li>a list</li></ol><p>and this</p><ol><li>is</li><li>another</li></ol>'
>>> render("----\\na | b\\nc | d\\n----\\n")
'<table><tbody><tr class="first"><td>a</td><td>b</td></tr><tr class="even"><td>c</td><td>d</td></tr></tbody></table>'
>>> render("----\\nhello world\\n----\\n")
'<blockquote>hello world</blockquote>'
>>> render('[[myanchor]]')
'<p><span class="anchor" id="markmin_myanchor"></span></p>'
>>> render('[[ http://example.com]]')
'<p><a href="http://example.com">http://example.com</a></p>'
>>> render('[[bookmark [http://example.com] ]]')
'<p><span class="anchor" id="markmin_bookmark"><a href="http://example.com">http://example.com</a></span></p>'
>>> render('[[this is a link http://example.com]]')
'<p><a href="http://example.com">this is a link</a></p>'
>>> render('[[this is an image http://example.com left]]')
'<p><img src="http://example.com" alt="this is an image" style="float:left" /></p>'
>>> render('[[this is an image http://example.com left 200px]]')
'<p><img src="http://example.com" alt="this is an image" style="float:left;width:200px" /></p>'
>>> render("[[Your browser doesn't support <video> HTML5 tag http://example.com video]]")
'<p><video controls="controls"><source src="http://example.com" />Your browser doesn\\'t support <video> HTML5 tag</video></p>'
>>> render("[[Your browser doesn't support <audio> HTML5 tag http://example.com audio]]")
'<p><audio controls="controls"><source src="http://example.com" />Your browser doesn\\'t support <audio> HTML5 tag</audio></p>'
>>> render("[[Your\\nbrowser\\ndoesn't\\nsupport\\n<audio> HTML5 tag http://exam\\\\\\nple.com\\naudio]]")
'<p><audio controls="controls"><source src="http://example.com" />Your browser doesn\\'t support <audio> HTML5 tag</audio></p>'
>>> render('[[this is a **link** http://example.com]]')
'<p><a href="http://example.com">this is a <strong>link</strong></a></p>'
>>> render("``aaa``:custom", extra=dict(custom=lambda text: 'x'+text+'x'))
'xaaax'
>>> print render(r"$$\int_a^b sin(x)dx$$")
<img src="http://chart.apis.google.com/chart?cht=tx&chl=%5Cint_a%5Eb%20sin%28x%29dx" />
>>> markmin2html(r"use backslash: \[\[[[mess\[[ag\]]e link]]\]]")
'<p>use backslash: [[<a href="link">mess[[ag]]e</a>]]</p>'
>>> markmin2html("backslash instead of exclamation sign: \``probe``")
'<p>backslash instead of exclamation sign: ``probe``</p>'
>>> render(r"simple image: [[\[[this is an image\]] http://example.com IMG]]!!!")
'<p>simple image: <img src="http://example.com" alt="[[this is an image]]" />!!!</p>'
>>> render(r"simple link no anchor with popup: [[ http://example.com popup]]")
'<p>simple link no anchor with popup: <a href="http://example.com" target="_blank">http://example.com</a></p>'
>>> render("auto-url: http://example.com")
'<p>auto-url: <a href="http://example.com">http://example.com</a></p>'
>>> render("auto-image: (http://example.com/image.jpeg)")
'<p>auto-image: (<img src="http://example.com/image.jpeg" controls />)</p>'
>>> render("qr: (qr:http://example.com/image.jpeg)")
'<p>qr: (<img style="width:100px" src="http://chart.apis.google.com/chart?cht=qr&chs=100x100&chl=http://example.com/image.jpeg&choe=UTF-8&chld=H" alt="QR Code" title="QR Code" />)</p>'
>>> render("embed: (embed:http://example.com/page)")
'<p>embed: (<iframe src="http://example.com/page" frameborder="0" allowfullscreen></iframe>)</p>'
>>> render("iframe: (iframe:http://example.com/page)")
'<p>iframe: (<iframe src="http://example.com/page" frameborder="0" allowfullscreen></iframe>)</p>'
>>> render("title1: [[test message [simple \[test\] title] http://example.com ]] test")
'<p>title1: <a href="http://example.com" title="simple [test] title">test message</a> test</p>'
>>> render("title2: \[\[[[test message [simple title] http://example.com popup]]\]]")
'<p>title2: [[<a href="http://example.com" title="simple title" target="_blank">test message</a>]]</p>'
>>> render("title3: [[ [link w/o anchor but with title] http://www.example.com ]]")
'<p>title3: <a href="http://www.example.com" title="link w/o anchor but with title">http://www.example.com</a></p>'
>>> render("title4: [[ [simple title] http://www.example.com popup]]")
'<p>title4: <a href="http://www.example.com" title="simple title" target="_blank">http://www.example.com</a></p>'
>>> render("title5: [[test message [simple title] http://example.com IMG]]")
'<p>title5: <img src="http://example.com" alt="test message" title="simple title" /></p>'
>>> render("title6: [[[test message w/o title] http://example.com IMG]]")
'<p>title6: <img src="http://example.com" alt="[test message w/o title]" /></p>'
>>> render("title7: [[[this is not a title] [this is a title] http://example.com IMG]]")
'<p>title7: <img src="http://example.com" alt="[this is not a title]" title="this is a title" /></p>'
>>> render("title8: [[test message [title] http://example.com center]]")
'<p>title8: <p style="text-align:center"><img src="http://example.com" alt="test message" title="title" /></p></p>'
>>> render("title9: [[test message [title] http://example.com left]]")
'<p>title9: <img src="http://example.com" alt="test message" title="title" style="float:left" /></p>'
>>> render("title10: [[test message [title] http://example.com right 100px]]")
'<p>title10: <img src="http://example.com" alt="test message" title="title" style="float:right;width:100px" /></p>'
>>> render("title11: [[test message [title] http://example.com center 200px]]")
'<p>title11: <p style="text-align:center"><img src="http://example.com" alt="test message" title="title" style="width:200px" /></p></p>'
>>> render(r"\\[[probe]]")
'<p>[[probe]]</p>'
>>> render(r"\\\\[[probe]]")
'<p>\\\\<span class="anchor" id="markmin_probe"></span></p>'
>>> render(r"\\\\\\[[probe]]")
'<p>\\\\[[probe]]</p>'
>>> render(r"\\\\\\\\[[probe]]")
'<p>\\\\\\\\<span class="anchor" id="markmin_probe"></span></p>'
>>> render(r"\\\\\\\\\[[probe]]")
'<p>\\\\\\\\[[probe]]</p>'
>>> render(r"\\\\\\\\\\\[[probe]]")
'<p>\\\\\\\\\\\\<span class="anchor" id="markmin_probe"></span></p>'
>>> render("``[[ [\\[[probe\]\\]] URL\\[x\\]]]``:red[dummy_params]")
'<span style="color: red"><a href="URL[x]" title="[[probe]]">URL[x]</a></span>'
>>> render("the \\**text**")
'<p>the **text**</p>'
>>> render("the \\``text``")
'<p>the ``text``</p>'
>>> render("the \\\\''text''")
"<p>the ''text''</p>"
>>> render("the [[link [**with** ``<b>title</b>``:red] http://www.example.com]]")
'<p>the <a href="http://www.example.com" title="**with** ``<b>title</b>``:red">link</a></p>'
>>> render("the [[link \\[**without** ``<b>title</b>``:red\\] http://www.example.com]]")
'<p>the <a href="http://www.example.com">link [<strong>without</strong> <span style="color: red"><b>title</b></span>]</a></p>'
>>> render("aaa-META-``code``:text[]-LINK-[[link http://www.example.com]]-LINK-[[image http://www.picture.com img]]-end")
'<p>aaa-META-<code class="text">code</code>-LINK-<a href="http://www.example.com">link</a>-LINK-<img src="http://www.picture.com" alt="image" />-end</p>'
>>> render("[[<a>test</a> [<a>test2</a>] <a>text3</a>]]")
'<p><a href="<a>text3</a>" title="<a>test2</a>"><a>test</a></a></p>'
>>> render("[[<a>test</a> [<a>test2</a>] <a>text3</a> IMG]]")
'<p><img src="<a>text3</a>" alt="<a>test</a>" title="<a>test2</a>" /></p>'
>>> render("**bold** ''italic'' ~~strikeout~~")
'<p><strong>bold</strong> <em>italic</em> <del>strikeout</del></p>'
>>> render("this is ``a red on yellow text``:c[#FF0000:#FFFF00]")
'<p>this is <span style="color: #FF0000;background-color: #FFFF00;">a red on yellow text</span></p>'
>>> render("this is ``a text with yellow background``:c[:yellow]")
'<p>this is <span style="background-color: yellow;">a text with yellow background</span></p>'
>>> render("this is ``a colored text (RoyalBlue)``:color[rgb(65,105,225)]")
'<p>this is <span style="color: rgb(65,105,225);">a colored text (RoyalBlue)</span></p>'
>>> render("this is ``a green text``:color[green:]")
'<p>this is <span style="color: green;">a green text</span></p>'
>>> render("**@{probe:1}**", environment=dict(probe=lambda t:"test %s" % t))
'<p><strong>test 1</strong></p>'
>>> render("**@{probe:t=a}**", environment=dict(probe=lambda t:"test %s" % t, a=1))
'<p><strong>test 1</strong></p>'
>>> render('[[id1 [span **messag** in ''markmin''] ]] ... [[**link** to id [link\\\'s title] #mark1]]')
'<p><span class="anchor" id="markmin_id1">span <strong>messag</strong> in markmin</span> ... <a href="#markmin_mark1" title="link\\\'s title"><strong>link</strong> to id</a></p>'
>>> render('# Multiline[[NEWLINE]]\\n title\\nParagraph[[NEWLINE]]\\nwith breaks[[NEWLINE]]\\nin it')
'<h1>Multiline<br /> title</h1><p>Paragraph<br /> with breaks<br /> in it</p>'
>>> render("anchor with name 'NEWLINE': [[NEWLINE [ ] ]]")
'<p>anchor with name \\'NEWLINE\\': <span class="anchor" id="markmin_NEWLINE"></span></p>'
>>> render("anchor with name 'NEWLINE': [[NEWLINE [newline] ]]")
'<p>anchor with name \\'NEWLINE\\': <span class="anchor" id="markmin_NEWLINE">newline</span></p>'
"""
if autolinks == "default":
autolinks = autolinks_simple
if protolinks == "default":
protolinks = protolinks_simple
pp = '\n' if pretty_print else ''
if isinstance(text, unicode):
text = text.encode('utf8')
text = str(text or '')
text = regex_backslash.sub(lambda m: m.group(1).translate(ttab_in), text)
text = text.replace('\x05', '').replace('\r\n', '\n') # concatenate strings separeted by \\n
if URL is not None:
text = replace_at_urls(text, URL)
if latex == 'google':
text = regex_dd.sub('``\g<latex>``:latex ', text)
#############################################################
# replace all blocks marked with ``...``:class[id] with META
# store them into segments they will be treated as code
#############################################################
segments = []
def mark_code(m):
g = m.group(0)
if g in (META, DISABLED_META):
segments.append((None, None, None, g))
return m.group()
elif g == '````':
segments.append((None, None, None, ''))
return m.group()
else:
c = m.group('c') or ''
p = m.group('p') or ''
if 'code' in allowed and c not in allowed['code']:
c = ''
code = m.group('t').replace('!`!', '`')
segments.append((code, c, p, m.group(0)))
return META
text = regex_code.sub(mark_code, text)
#############################################################
# replace all blocks marked with [[...]] with LINK
# store them into links they will be treated as link
#############################################################
links = []
def mark_link(m):
links.append(None if m.group() == LINK
else m.group('s'))
return LINK
text = regex_link.sub(mark_link, text)
text = escape(text)
if protolinks:
text = regex_proto.sub(lambda m: protolinks(*m.group('p', 'k')), text)
if autolinks:
text = replace_autolinks(text, autolinks)
#############################################################
# normalize spaces
#############################################################
strings = text.split('\n')
def parse_title(t, s): # out, lev, etags, tag, s):
hlevel = str(len(t))
out.extend(etags[::-1])
out.append("<h%s>%s" % (hlevel, s))
etags[:] = ["</h%s>%s" % (hlevel, pp)]
lev = 0
ltags[:] = []
tlev[:] = []
return (lev, 'h')
def parse_list(t, p, s, tag, lev, mtag, lineno):
lent = len(t)
if lent < lev: # current item level < previous item level
while ltags[-1] > lent:
ltags.pop()
out.append(etags.pop())
lev = lent
tlev[lev:] = []
if lent > lev: # current item level > previous item level
if lev == 0: # previous line is not a list (paragraph or title)
out.extend(etags[::-1])
ltags[:] = []
tlev[:] = []
etags[:] = []
if pend and mtag == '.': # paragraph in a list:
out.append(etags.pop())
ltags.pop()
for i in xrange(lent - lev):
out.append('<' + tag + '>' + pp)
etags.append('</' + tag + '>' + pp)
lev += 1
ltags.append(lev)
tlev.append(tag)
elif lent == lev:
if tlev[-1] != tag:
# type of list is changed (ul<=>ol):
for i in xrange(ltags.count(lent)):
ltags.pop()
out.append(etags.pop())
tlev[-1] = tag
out.append('<' + tag + '>' + pp)
etags.append('</' + tag + '>' + pp)
ltags.append(lev)
else:
if ltags.count(lev) > 1:
out.append(etags.pop())
ltags.pop()
mtag = 'l'
out.append('<li>')
etags.append('</li>' + pp)
ltags.append(lev)
if s[:1] == '-':
(s, mtag, lineno) = parse_table_or_blockquote(s, mtag, lineno)
if p and mtag == 'l':
(lev, mtag, lineno) = parse_point(t, s, lev, '', lineno)
else:
out.append(s)
return (lev, mtag, lineno)
def parse_point(t, s, lev, mtag, lineno):
""" paragraphs in lists """
lent = len(t)
if lent > lev:
return parse_list(t, '.', s, 'ul', lev, mtag, lineno)
elif lent < lev:
while ltags[-1] > lent:
ltags.pop()
out.append(etags.pop())
lev = lent
tlev[lev:] = []
mtag = ''
elif lent == lev:
if pend and mtag == '.':
out.append(etags.pop())
ltags.pop()
if br and mtag in ('l', '.'):
out.append(br)
if s == META:
mtag = ''
else:
mtag = '.'
if s[:1] == '-':
(s, mtag, lineno) = parse_table_or_blockquote(s, mtag, lineno)
if mtag == '.':
out.append(pbeg)
if pend:
etags.append(pend)
ltags.append(lev)
out.append(s)
return (lev, mtag, lineno)
def parse_table_or_blockquote(s, mtag, lineno):
# check next line. If next line :
# - is empty -> this is an <hr /> tag
# - consists '|' -> table
# - consists other characters -> blockquote
if (lineno + 1 >= strings_len or
not (s.count('-') == len(s) and len(s) > 3)):
return (s, mtag, lineno)
lineno += 1
s = strings[lineno].strip()
if s:
if '|' in s:
# table
tout = []
thead = []
tbody = []
rownum = 0
t_id = ''
t_cls = ''
# parse table:
while lineno < strings_len:
s = strings[lineno].strip()
if s[:1] == '=':
# header or footer
if s.count('=') == len(s) and len(s) > 3:
if not thead: # if thead list is empty:
thead = tout
else:
tbody.extend(tout)
tout = []
rownum = 0
lineno += 1
continue
m = regex_tq.match(s)
if m:
t_cls = m.group('c') or ''
t_id = m.group('p') or ''
break
if rownum % 2:
tr = '<tr class="even">'
else:
tr = '<tr class="first">' if rownum == 0 else '<tr>'
tout.append(tr + ''.join(['<td%s>%s</td>' % (
' class="num"'
if regex_num.match(f) else '',
f.strip()
) for f in s.split('|')]) + '</tr>' + pp)
rownum += 1
lineno += 1
t_cls = ' class="%s%s"' % (class_prefix, t_cls) \
if t_cls and t_cls != 'id' else ''
t_id = ' id="%s%s"' % (id_prefix, t_id) if t_id else ''
s = ''
if thead:
s += '<thead>' + pp + ''.join([l for l in thead]) + '</thead>' + pp
if not tbody: # tbody strings are in tout list
tbody = tout
tout = []
if tbody: # if tbody list is not empty:
s += '<tbody>' + pp + ''.join([l for l in tbody]) + '</tbody>' + pp
if tout: # tfoot is not empty:
s += '<tfoot>' + pp + ''.join([l for l in tout]) + '</tfoot>' + pp
s = '<table%s%s>%s%s</table>%s' % (t_cls, t_id, pp, s, pp)
mtag = 't'
else:
# parse blockquote:
bq_begin = lineno
t_mode = False # embedded table
t_cls = ''
t_id = ''
# search blockquote closing line:
while lineno < strings_len:
s = strings[lineno].strip()
if not t_mode:
m = regex_tq.match(s)
if m:
if (lineno + 1 == strings_len or
'|' not in strings[lineno + 1]):
t_cls = m.group('c') or ''
t_id = m.group('p') or ''
break
if regex_bq_headline.match(s):
if (lineno + 1 < strings_len and
strings[lineno + 1].strip()):
t_mode = True
lineno += 1
continue
elif regex_tq.match(s):
t_mode = False
lineno += 1
continue
lineno += 1
t_cls = ' class="%s%s"' % (class_prefix, t_cls) \
if t_cls and t_cls != 'id' else ''
t_id = ' id="%s%s"' % (id_prefix, t_id) \
if t_id else ''
s = '<blockquote%s%s>%s</blockquote>%s' \
% (t_cls,
t_id,
'\n'.join(strings[bq_begin:lineno]), pp)
mtag = 'q'
else:
s = '<hr />'
lineno -= 1
mtag = 'q'
return (s, 'q', lineno)
if sep == 'p':
pbeg = "<p>"
pend = "</p>" + pp
br = ''
else:
pbeg = pend = ''
br = "<br />" + pp if sep == 'br' else ''
lev = 0 # nesting level of lists
c0 = '' # first character of current line
out = [] # list of processed lines
etags = [] # trailing tags
ltags = [] # level# correspondent to trailing tag
tlev = [] # list of tags for each level ('ul' or 'ol')
mtag = '' # marked tag (~last tag) ('l','.','h','p','t'). Used to set <br/>
# and to avoid <p></p> around tables and blockquotes
lineno = 0
strings_len = len(strings)
while lineno < strings_len:
s0 = strings[lineno][:1]
s = strings[lineno].strip()
""" # + - . ---------------------
## ++ -- .. ------- field | field | field <-title
### +++ --- ... quote =====================
#### ++++ ---- .... ------- field | field | field <-body
##### +++++ ----- ..... ---------------------:class[id]
"""
pc0 = c0 # first character of previous line
c0 = s[:1]
if c0: # for non empty strings
if c0 in "#+-.": # first character is one of: # + - .
(t1, t2, p, ss) = regex_list.findall(s)[0]
# t1 - tag ("###")
# t2 - tag ("+++", "---", "...")
# p - paragraph point ('.')->for "++." or "--."
# ss - other part of string
if t1 or t2:
# headers and lists:
if c0 == '#': # headers
(lev, mtag) = parse_title(t1, ss)
lineno += 1
continue
elif c0 == '+': # ordered list
(lev, mtag, lineno) = parse_list(t2, p, ss, 'ol', lev, mtag, lineno)
lineno += 1
continue
elif c0 == '-': # unordered list, table or blockquote
if p or ss:
(lev, mtag, lineno) = parse_list(t2, p, ss, 'ul', lev, mtag, lineno)
lineno += 1
continue
else:
(s, mtag, lineno) = parse_table_or_blockquote(s, mtag, lineno)
elif lev > 0: # and c0 == '.' # paragraph in lists
(lev, mtag, lineno) = parse_point(t2, ss, lev, mtag, lineno)
lineno += 1
continue
if lev == 0 and (mtag == 'q' or s == META):
# new paragraph
pc0 = ''
if pc0 == '' or (mtag != 'p' and s0 not in (' ', '\t')):
# paragraph
out.extend(etags[::-1])
etags = []
ltags = []
tlev = []
lev = 0
if br and mtag == 'p':
out.append(br)
if mtag != 'q' and s != META:
if pend:
etags = [pend]
out.append(pbeg)
mtag = 'p'
else:
mtag = ''
out.append(s)
else:
if lev > 0 and mtag == '.' and s == META:
out.append(etags.pop())
ltags.pop()
out.append(s)
mtag = ''
else:
out.append(' ' + s)
lineno += 1
out.extend(etags[::-1])
text = ''.join(out)
#############################################################
# do strong,em,del
#############################################################
text = regex_strong.sub('<strong>\g<t></strong>', text)
text = regex_del.sub('<del>\g<t></del>', text)
text = regex_em.sub('<em>\g<t></em>', text)
#############################################################
# deal with images, videos, audios and links
#############################################################
def sub_media(m):
t, a, k, p, w = m.group('t', 'a', 'k', 'p', 'w')
if not k:
return m.group(0)
k = escape(k)
t = t or ''
style = 'width:%s' % w if w else ''
title = ' title="%s"' % escape(a).replace(META, DISABLED_META) if a else ''
p_begin = p_end = ''
if p == 'center':
p_begin = '<p style="text-align:center">'
p_end = '</p>' + pp
elif p == 'blockleft':
p_begin = '<p style="text-align:left">'
p_end = '</p>' + pp
elif p == 'blockright':
p_begin = '<p style="text-align:right">'
p_end = '</p>' + pp
elif p in ('left', 'right'):
style = ('float:%s' % p) + (';%s' % style if style else '')
if t and regex_auto.match(t):
p_begin = p_begin + '<a href="%s">' % t
p_end = '</a>' + p_end
t = ''
if style:
style = ' style="%s"' % style
if p in ('video', 'audio'):
t = render(t, {}, {}, 'br', URL, environment, latex,
autolinks, protolinks, class_prefix, id_prefix, pretty_print)
return '<%(p)s controls="controls"%(title)s%(style)s><source src="%(k)s" />%(t)s</%(p)s>' \
% dict(p=p, title=title, style=style, k=k, t=t)
alt = ' alt="%s"' % escape(t).replace(META, DISABLED_META) if t else ''
return '%(begin)s<img src="%(k)s"%(alt)s%(title)s%(style)s />%(end)s' \
% dict(begin=p_begin, k=k, alt=alt, title=title, style=style, end=p_end)
def sub_link(m):
t, a, k, p = m.group('t', 'a', 'k', 'p')
if not k and not t:
return m.group(0)
t = t or ''
a = escape(a) if a else ''
if k:
if '#' in k and ':' not in k.split('#')[0]:
# wikipage, not external url
k = k.replace('#', '#' + id_prefix)
k = escape(k)
title = ' title="%s"' % a.replace(META, DISABLED_META) if a else ''
target = ' target="_blank"' if p == 'popup' else ''
t = render(t, {}, {}, 'br', URL, environment, latex, None,
None, class_prefix, id_prefix, pretty_print) if t else k
return '<a href="%(k)s"%(title)s%(target)s>%(t)s</a>' \
% dict(k=k, title=title, target=target, t=t)
if t == 'NEWLINE' and not a:
return '<br />' + pp
return '<span class="anchor" id="%s">%s</span>' % (
escape(id_prefix + t),
render(a, {}, {}, 'br', URL,
environment, latex, autolinks,
protolinks, class_prefix,
id_prefix, pretty_print))
parts = text.split(LINK)
text = parts[0]
for i, s in enumerate(links):
if s is None:
html = LINK
else:
html = regex_media_level2.sub(sub_media, s)
if html == s:
html = regex_link_level2.sub(sub_link, html)
if html == s:
# return unprocessed string as a signal of an error
html = '[[%s]]' % s
text += html + parts[i + 1]
#############################################################
# process all code text
#############################################################
def expand_meta(m):
code, b, p, s = segments.pop(0)
if code is None or m.group() == DISABLED_META:
return escape(s)
if b in extra:
if code[:1] == '\n':
code = code[1:]
if code[-1:] == '\n':
code = code[:-1]
if p:
return str(extra[b](code, p))
else:
return str(extra[b](code))
elif b == 'cite':
return '[' + ','.join('<a href="#%s" class="%s">%s</a>' %
(id_prefix + d, b, d) for d in escape(code).split(',')) + ']'
elif b == 'latex':
return LATEX % urllib.quote(code)
elif b in html_colors:
return '<span style="color: %s">%s</span>' \
% (b, render(code, {}, {}, 'br', URL, environment, latex,
autolinks, protolinks, class_prefix, id_prefix, pretty_print))
elif b in ('c', 'color') and p:
c = p.split(':')
fg = 'color: %s;' % c[0] if c[0] else ''
bg = 'background-color: %s;' % c[1] if len(c) > 1 and c[1] else ''
return '<span style="%s%s">%s</span>' \
% (fg, bg, render(code, {}, {}, 'br', URL, environment, latex,
autolinks, protolinks, class_prefix, id_prefix, pretty_print))
cls = ' class="%s%s"' % (class_prefix, b) if b and b != 'id' else ''
id = ' id="%s%s"' % (id_prefix, escape(p)) if p else ''
beg = (code[:1] == '\n')
end = [None, -1][code[-1:] == '\n']
if beg and end:
return '<pre><code%s%s>%s</code></pre>%s' % (cls, id, escape(code[1:-1]), pp)
return '<code%s%s>%s</code>' % (cls, id, escape(code[beg:end]))
text = regex_expand_meta.sub(expand_meta, text)
if environment:
text = replace_components(text, environment)
return text.translate(ttab_out)
def markmin2html(text, extra={}, allowed={}, sep='p',
autolinks='default', protolinks='default',
class_prefix='', id_prefix='markmin_', pretty_print=False):
return render(text, extra, allowed, sep,
autolinks=autolinks, protolinks=protolinks,
class_prefix=class_prefix, id_prefix=id_prefix,
pretty_print=pretty_print)
def run_doctests():
import doctest
doctest.testmod()
if __name__ == '__main__':
import sys
import doctest
from textwrap import dedent
html = dedent("""
<!doctype html>
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
%(style)s
<title>%(title)s</title>
</head>
<body>
%(body)s
</body>
</html>""")[1:]
if sys.argv[1:2] == ['-h']:
style = dedent("""
<style>
blockquote { background-color: #FFFAAE; padding: 7px; }
table { border-collapse: collapse; }
thead td { border-bottom: 1px solid; }
tfoot td { border-top: 1px solid; }
.tableclass1 { background-color: lime; }
.tableclass1 thead { color: yellow; background-color: green; }
.tableclass1 tfoot { color: yellow; background-color: green; }
.tableclass1 .even td { background-color: #80FF7F; }
.tableclass1 .first td {border-top: 1px solid; }
td.num { text-align: right; }
pre { background-color: #E0E0E0; padding: 5px; }
</style>""")[1:]
print html % dict(title="Markmin markup language",
style=style,
body=markmin2html(__doc__, pretty_print=True))
elif sys.argv[1:2] == ['-t']:
from timeit import Timer
loops = 1000
ts = Timer("markmin2html(__doc__)", "from markmin2html import markmin2html")
print 'timeit "markmin2html(__doc__)":'
t = min([ts.timeit(loops) for i in range(3)])
print "%s loops, best of 3: %.3f ms per loop" % (loops, t / 1000 * loops)
elif len(sys.argv) > 1:
fargv = open(sys.argv[1], 'r')
try:
markmin_text = fargv.read()
# embed css file from second parameter into html file
if len(sys.argv) > 2:
if sys.argv[2].startswith('@'):
markmin_style = '<link rel="stylesheet" href="' + sys.argv[2][1:] + '"/>'
else:
fargv2 = open(sys.argv[2], 'r')
try:
markmin_style = "<style>\n" + fargv2.read() + "</style>"
finally:
fargv2.close()
else:
markmin_style = ""
print html % dict(title=sys.argv[1], style=markmin_style,
body=markmin2html(markmin_text, pretty_print=True))
finally:
fargv.close()
else:
print "Usage: " + sys.argv[0] + " -h | -t | file.markmin [file.css|@path_to/css]"
print "where: -h - print __doc__"
print " -t - timeit __doc__ (for testing purpuse only)"
print " file.markmin [file.css] - process file.markmin + built in file.css (optional)"
print " file.markmin [@path_to/css] - process file.markmin + link path_to/css (optional)"
run_doctests()
| mit |
mcflugen/topoflow | topoflow/gui/ZZ_Old_Stuff/plot_window.py | 6 | 1324 |
# April 24, 2009
# S.D. Peckham
import wx
#----------------------------------------------------------------
class Plot_Window(wx.Window):
def __init__(self, parent, title, labels):
wx.Window.__init__(self, parent)
self.title = title
self.labels = labels
self.data = [0.0] * len(labels)
self.Init_Buffer()
self.Bind(wx.EVT_SIZE, self.On_Resize)
self.Bind(wx.EVT_PAINT, self.On_Paint)
# __init__()
#------------------------------------------------------------
def On_Resize(self, event)
self.InitBuffer() # (need a new buffer)
# On_Resize()
#------------------------------------------------------------
def On_Paint(self, event)
dc = wx.BufferedPaintDC(self, self.buffer)
# On_Paint()
#------------------------------------------------------------
def Init_Buffer(self)
w, h = self.GetClientSize()
self.buffer = wx.EmptyBitmap(w, h)
dc = wx.BufferedDC(wx.ClientDC(self), self.buffer)
self.Draw_Graph(dc)
# Init_Buffer()
#------------------------------------------------------------
def Get_Data(self)
return self.data
# Get_Data()
#------------------------------------------------------------
| mit |
damirda/ansible-modules-core | cloud/amazon/ec2_vpc_net.py | 21 | 10137 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vpc_net
short_description: Configure AWS virtual private clouds
description:
- Create or terminate AWS virtual private clouds. This module has a dependency on python-boto.
version_added: "2.0"
author: Jonathan Davila (@defionscode)
options:
name:
description:
- The name to give your VPC. This is used in combination with the cidr_block paramater to determine if a VPC already exists.
required: yes
cidr_block:
description:
- The CIDR of the VPC
required: yes
tenancy:
description:
- Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created.
required: false
default: default
choices: [ 'default', 'dedicated' ]
dns_support:
description:
- Whether to enable AWS DNS support.
required: false
default: yes
choices: [ 'yes', 'no' ]
dns_hostnames:
description:
- Whether to enable AWS hostname support.
required: false
default: yes
choices: [ 'yes', 'no' ]
dhcp_opts_id:
description:
- the id of the DHCP options to use for this vpc
default: null
required: false
tags:
description:
- The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of the VPC if it's different.
default: None
required: false
aliases: [ 'resource_tags' ]
state:
description:
- The state of the VPC. Either absent or present.
default: present
required: false
choices: [ 'present', 'absent' ]
multi_ok:
description:
- By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want duplicate VPCs created.
default: false
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create a VPC with dedicate tenancy and a couple of tags
- ec2_vpc_net:
name: Module_dev2
cidr_block: 10.10.0.0/16
region: us-east-1
tags:
module: ec2_vpc_net
this: works
tenancy: dedicated
'''
try:
import boto
import boto.ec2
import boto.vpc
from boto.exception import BotoServerError
HAS_BOTO=True
except ImportError:
HAS_BOTO=False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def vpc_exists(module, vpc, name, cidr_block, multi):
"""Returns True or False in regards to the existence of a VPC. When supplied
with a CIDR, it will check for matching tags to determine if it is a match
otherwise it will assume the VPC does not exist and thus return false.
"""
matched_vpc = None
try:
matching_vpcs=vpc.get_all_vpcs(filters={'tag:Name' : name, 'cidr-block' : cidr_block})
except Exception as e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
if len(matching_vpcs) == 1:
matched_vpc = matching_vpcs[0]
elif len(matching_vpcs) > 1:
if multi:
module.fail_json(msg='Currently there are %d VPCs that have the same name and '
'CIDR block you specified. If you would like to create '
'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs))
return matched_vpc
def update_vpc_tags(vpc, module, vpc_obj, tags, name):
if tags is None:
tags = dict()
tags.update({'Name': name})
try:
current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_obj.id}))
if cmp(tags, current_tags):
if not module.check_mode:
vpc.create_tags(vpc_obj.id, tags)
return True
else:
return False
except Exception as e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
if vpc_obj.dhcp_options_id != dhcp_id:
if not module.check_mode:
connection.associate_dhcp_options(dhcp_id, vpc_obj.id)
return True
else:
return False
def get_vpc_values(vpc_obj):
if vpc_obj is not None:
vpc_values = vpc_obj.__dict__
if "region" in vpc_values:
vpc_values.pop("region")
if "item" in vpc_values:
vpc_values.pop("item")
if "connection" in vpc_values:
vpc_values.pop("connection")
return vpc_values
else:
return None
def main():
argument_spec=ec2_argument_spec()
argument_spec.update(dict(
name = dict(type='str', default=None, required=True),
cidr_block = dict(type='str', default=None, required=True),
tenancy = dict(choices=['default', 'dedicated'], default='default'),
dns_support = dict(type='bool', default=True),
dns_hostnames = dict(type='bool', default=True),
dhcp_opts_id = dict(type='str', default=None, required=False),
tags = dict(type='dict', required=False, default=None, aliases=['resource_tags']),
state = dict(choices=['present', 'absent'], default='present'),
multi_ok = dict(type='bool', default=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not HAS_BOTO:
module.fail_json(msg='boto is required for this module')
name=module.params.get('name')
cidr_block=module.params.get('cidr_block')
tenancy=module.params.get('tenancy')
dns_support=module.params.get('dns_support')
dns_hostnames=module.params.get('dns_hostnames')
dhcp_id=module.params.get('dhcp_opts_id')
tags=module.params.get('tags')
state=module.params.get('state')
multi=module.params.get('multi_ok')
changed=False
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if dns_hostnames and not dns_support:
module.fail_json('In order to enable DNS Hostnames you must also enable DNS support')
if state == 'present':
# Check if VPC exists
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
if vpc_obj is None:
try:
changed = True
if not module.check_mode:
vpc_obj = connection.create_vpc(cidr_block, instance_tenancy=tenancy)
else:
module.exit_json(changed=changed)
except BotoServerError as e:
module.fail_json(msg=e)
if dhcp_id is not None:
try:
if update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
changed = True
except BotoServerError as e:
module.fail_json(msg=e)
if tags is not None or name is not None:
try:
if update_vpc_tags(connection, module, vpc_obj, tags, name):
changed = True
except BotoServerError as e:
module.fail_json(msg=e)
# Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute
# which is needed in order to detect the current status of DNS options. For now we just update
# the attribute each time and is not used as a changed-factor.
try:
if not module.check_mode:
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_support=dns_support)
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_hostnames=dns_hostnames)
except BotoServerError as e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
if not module.check_mode:
# get the vpc obj again in case it has changed
try:
vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0]
except BotoServerError as e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
elif state == 'absent':
# Check if VPC exists
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
if vpc_obj is not None:
try:
if not module.check_mode:
connection.delete_vpc(vpc_obj.id)
vpc_obj = None
changed = True
except BotoServerError as e:
e_msg = boto_exception(e)
module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
"and/or ec2_vpc_route_table modules to ensure the other components are absent." % e_msg)
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
ryfeus/lambda-packs | Tensorflow_LightGBM_Scipy_nightly/source/setuptools/build_meta.py | 5 | 5995 | """A PEP 517 interface to setuptools
Previously, when a user or a command line tool (let's call it a "frontend")
needed to make a request of setuptools to take a certain action, for
example, generating a list of installation requirements, the frontend would
would call "setup.py egg_info" or "setup.py bdist_wheel" on the command line.
PEP 517 defines a different method of interfacing with setuptools. Rather
than calling "setup.py" directly, the frontend should:
1. Set the current directory to the directory with a setup.py file
2. Import this module into a safe python interpreter (one in which
setuptools can potentially set global variables or crash hard).
3. Call one of the functions defined in PEP 517.
What each function does is defined in PEP 517. However, here is a "casual"
definition of the functions (this definition should not be relied on for
bug reports or API stability):
- `build_wheel`: build a wheel in the folder and return the basename
- `get_requires_for_build_wheel`: get the `setup_requires` to build
- `prepare_metadata_for_build_wheel`: get the `install_requires`
- `build_sdist`: build an sdist in the folder and return the basename
- `get_requires_for_build_sdist`: get the `setup_requires` to build
Again, this is not a formal definition! Just a "taste" of the module.
"""
import os
import sys
import tokenize
import shutil
import contextlib
import setuptools
import distutils
class SetupRequirementsError(BaseException):
def __init__(self, specifiers):
self.specifiers = specifiers
class Distribution(setuptools.dist.Distribution):
def fetch_build_eggs(self, specifiers):
raise SetupRequirementsError(specifiers)
@classmethod
@contextlib.contextmanager
def patch(cls):
"""
Replace
distutils.dist.Distribution with this class
for the duration of this context.
"""
orig = distutils.core.Distribution
distutils.core.Distribution = cls
try:
yield
finally:
distutils.core.Distribution = orig
def _to_str(s):
"""
Convert a filename to a string (on Python 2, explicitly
a byte string, not Unicode) as distutils checks for the
exact type str.
"""
if sys.version_info[0] == 2 and not isinstance(s, str):
# Assume it's Unicode, as that's what the PEP says
# should be provided.
return s.encode(sys.getfilesystemencoding())
return s
def _run_setup(setup_script='setup.py'):
# Note that we can reuse our build directory between calls
# Correctness comes first, then optimization later
__file__ = setup_script
__name__ = '__main__'
f = getattr(tokenize, 'open', open)(__file__)
code = f.read().replace('\\r\\n', '\\n')
f.close()
exec(compile(code, __file__, 'exec'), locals())
def _fix_config(config_settings):
config_settings = config_settings or {}
config_settings.setdefault('--global-option', [])
return config_settings
def _get_build_requires(config_settings, requirements):
config_settings = _fix_config(config_settings)
sys.argv = sys.argv[:1] + ['egg_info'] + \
config_settings["--global-option"]
try:
with Distribution.patch():
_run_setup()
except SetupRequirementsError as e:
requirements += e.specifiers
return requirements
def _get_immediate_subdirectories(a_dir):
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
def get_requires_for_build_wheel(config_settings=None):
config_settings = _fix_config(config_settings)
return _get_build_requires(config_settings, requirements=['wheel'])
def get_requires_for_build_sdist(config_settings=None):
config_settings = _fix_config(config_settings)
return _get_build_requires(config_settings, requirements=[])
def prepare_metadata_for_build_wheel(metadata_directory, config_settings=None):
sys.argv = sys.argv[:1] + ['dist_info', '--egg-base', _to_str(metadata_directory)]
_run_setup()
dist_info_directory = metadata_directory
while True:
dist_infos = [f for f in os.listdir(dist_info_directory)
if f.endswith('.dist-info')]
if len(dist_infos) == 0 and \
len(_get_immediate_subdirectories(dist_info_directory)) == 1:
dist_info_directory = os.path.join(
dist_info_directory, os.listdir(dist_info_directory)[0])
continue
assert len(dist_infos) == 1
break
# PEP 517 requires that the .dist-info directory be placed in the
# metadata_directory. To comply, we MUST copy the directory to the root
if dist_info_directory != metadata_directory:
shutil.move(
os.path.join(dist_info_directory, dist_infos[0]),
metadata_directory)
shutil.rmtree(dist_info_directory, ignore_errors=True)
return dist_infos[0]
def build_wheel(wheel_directory, config_settings=None,
metadata_directory=None):
config_settings = _fix_config(config_settings)
wheel_directory = os.path.abspath(wheel_directory)
sys.argv = sys.argv[:1] + ['bdist_wheel'] + \
config_settings["--global-option"]
_run_setup()
if wheel_directory != 'dist':
shutil.rmtree(wheel_directory)
shutil.copytree('dist', wheel_directory)
wheels = [f for f in os.listdir(wheel_directory)
if f.endswith('.whl')]
assert len(wheels) == 1
return wheels[0]
def build_sdist(sdist_directory, config_settings=None):
config_settings = _fix_config(config_settings)
sdist_directory = os.path.abspath(sdist_directory)
sys.argv = sys.argv[:1] + ['sdist'] + \
config_settings["--global-option"] + \
["--dist-dir", sdist_directory]
_run_setup()
sdists = [f for f in os.listdir(sdist_directory)
if f.endswith('.tar.gz')]
assert len(sdists) == 1
return sdists[0]
| mit |
LynxyssCZ/Flexget | flexget/plugins/clients/deluge.py | 5 | 42348 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.utils import native
import base64
import glob
import logging
import pkg_resources
import os
import re
import sys
import time
import warnings
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.template import RenderError
from flexget.utils.pathscrub import pathscrub
log = logging.getLogger('deluge')
def add_deluge_windows_install_dir_to_sys_path():
# Deluge does not install to python system on Windows, add the install directory to sys.path if it is found
if not (sys.platform.startswith('win') or os.environ.get('ProgramFiles')):
return
deluge_dir = os.path.join(os.environ['ProgramFiles'], 'Deluge')
log.debug('Looking for deluge install in %s' % deluge_dir)
if not os.path.isdir(deluge_dir):
return
deluge_egg = glob.glob(os.path.join(deluge_dir, 'deluge-*-py2.?.egg'))
if not deluge_egg:
return
minor_version = int(re.search(r'py2\.(\d).egg', deluge_egg[0]).group(1))
if minor_version != sys.version_info[1]:
log.verbose('Cannot use deluge from install directory because its python version doesn\'t match.')
return
log.debug('Found deluge install in %s adding to sys.path' % deluge_dir)
sys.path.append(deluge_dir)
for item in os.listdir(deluge_dir):
if item.endswith(('.egg', '.zip')):
sys.path.append(os.path.join(deluge_dir, item))
add_deluge_windows_install_dir_to_sys_path()
def install_pausing_reactor():
class PausingReactor(SelectReactor):
"""A SelectReactor that can be paused and resumed."""
def __init__(self):
SelectReactor.__init__(self)
self.paused = False
self._return_value = None
self._release_requested = False
self._mainLoopGen = None
# Older versions of twisted do not have the _started attribute, make it a synonym for running in that case
if not hasattr(self, '_started'):
PausingReactor._started = property(lambda self: self.running)
def _mainLoopGenerator(self):
"""Generator that acts as mainLoop, but yields when requested."""
while self._started:
try:
while self._started:
if self._release_requested:
self._release_requested = False
self.paused = True
yield self._return_value
self.paused = False
self.iterate()
except KeyboardInterrupt:
# Keyboard interrupt pauses the reactor
self.pause()
except GeneratorExit:
# GeneratorExit means stop the generator; Do it cleanly by stopping the whole reactor.
log.debug('Got GeneratorExit, stopping reactor.', exc_info=True)
self.paused = False
self.stop()
except Exception:
twisted_log.msg("Unexpected error in main loop.")
twisted_log.err()
else:
twisted_log.msg('Main loop terminated.')
def run(self, installSignalHandlers=False):
"""Starts or resumes the reactor."""
if not self._started:
self.startRunning(installSignalHandlers)
self._mainLoopGen = self._mainLoopGenerator()
try:
return next(self._mainLoopGen)
except StopIteration:
pass
def pause(self, return_value=None):
"""Causes reactor to pause after this iteration.
If :return_value: is specified, it will be returned by the reactor.run call."""
self._return_value = return_value
self._release_requested = True
def stop(self):
"""Stops the reactor."""
SelectReactor.stop(self)
# If this was called while the reactor was paused we have to resume in order for it to complete
if self.paused:
self.run()
# These need to be re-registered so that the PausingReactor can be safely restarted after a stop
self.addSystemEventTrigger('during', 'shutdown', self.crash)
self.addSystemEventTrigger('during', 'shutdown', self.disconnectAll)
# Configure twisted to use the PausingReactor.
installReactor(PausingReactor())
@event('manager.shutdown')
def stop_reactor(manager):
"""Shut down the twisted reactor after all tasks have run."""
if not reactor._stopped:
log.debug('Stopping twisted reactor.')
reactor.stop()
# Some twisted import is throwing a warning see #2434
warnings.filterwarnings('ignore', message='Not importing directory .*')
try:
from twisted.python import log as twisted_log
from twisted.internet.main import installReactor
from twisted.internet.selectreactor import SelectReactor
except ImportError:
# If twisted is not found, errors will be shown later
pass
else:
install_pausing_reactor()
try:
# These have to wait until reactor has been installed to import
from twisted.internet import reactor
from deluge.ui.client import client
from deluge.ui.common import get_localhost_auth
except (ImportError, pkg_resources.DistributionNotFound):
# If deluge is not found, errors will be shown later
pass
class DelugePlugin(object):
"""Base class for deluge plugins, contains settings and methods for connecting to a deluge daemon."""
def on_task_start(self, task, config):
"""Raise a DependencyError if our dependencies aren't available"""
try:
from deluge.ui.client import client # noqa
except ImportError as e:
log.debug('Error importing deluge: %s' % e)
raise plugin.DependencyError('deluge', 'deluge',
'Deluge >=1.2 module and it\'s dependencies required. ImportError: %s' % e,
log)
try:
from twisted.internet import reactor # noqa
except:
raise plugin.DependencyError('deluge', 'twisted.internet', 'Twisted.internet package required', log)
def on_task_abort(self, task, config):
pass
def prepare_connection_info(self, config):
config.setdefault('host', 'localhost')
config.setdefault('port', 58846)
if 'user' in config or 'pass' in config:
warnings.warn('deluge `user` and `pass` options have been renamed `username` and `password`',
DeprecationWarning)
config.setdefault('username', config.get('user', ''))
config.setdefault('password', config.get('pass', ''))
config.setdefault('username', '')
config.setdefault('password', '')
def on_disconnect(self):
"""Pauses the reactor. Gets called when we disconnect from the daemon."""
# pause the reactor, so flexget can continue
reactor.callLater(0, reactor.pause)
def on_connect_fail(self, result):
"""Pauses the reactor, returns PluginError. Gets called when connection to deluge daemon fails."""
log.debug('Connect to deluge daemon failed, result: %s' % result)
reactor.callLater(0, reactor.pause, plugin.PluginError('Could not connect to deluge daemon', log))
def on_connect_success(self, result, task, config):
"""Gets called when successfully connected to the daemon. Should do the work then call client.disconnect"""
raise NotImplementedError
def connect(self, task, config):
"""Connects to the deluge daemon and runs on_connect_success """
if config['host'] in ['localhost', '127.0.0.1'] and not config.get('username'):
# If an username is not specified, we have to do a lookup for the localclient username/password
auth = get_localhost_auth()
if auth[0]:
config['username'], config['password'] = auth
else:
raise plugin.PluginError('Unable to get local authentication info for Deluge. You may need to '
'specify an username and password from your Deluge auth file.')
client.set_disconnect_callback(self.on_disconnect)
d = client.connect(
host=config['host'],
port=config['port'],
username=config['username'],
password=config['password'])
d.addCallback(self.on_connect_success, task, config).addErrback(self.on_connect_fail)
result = reactor.run()
if isinstance(result, Exception):
raise result
return result
class InputDeluge(DelugePlugin):
"""Create entries for torrents in the deluge session."""
#
settings_map = {
'name': 'title',
'hash': 'torrent_info_hash',
'num_peers': 'torrent_peers',
'num_seeds': 'torrent_seeds',
'progress': 'deluge_progress',
'seeding_time': ('deluge_seed_time', lambda time: time / 3600),
'private': 'deluge_private',
'state': 'deluge_state',
'eta': 'deluge_eta',
'ratio': 'deluge_ratio',
'move_on_completed_path': 'deluge_movedone',
'save_path': 'deluge_path',
'label': 'deluge_label',
'total_size': ('content_size', lambda size: size / 1024 / 1024),
'files': ('content_files', lambda file_dicts: [f['path'] for f in file_dicts])}
extra_settings_map = {
'active_time': ('active_time', lambda time: time / 3600),
'compact': 'compact',
'distributed_copies': 'distributed_copies',
'download_payload_rate': 'download_payload_rate',
'file_progress': 'file_progress',
'is_auto_managed': 'is_auto_managed',
'is_seed': 'is_seed',
'max_connections': 'max_connections',
'max_download_speed': 'max_download_speed',
'max_upload_slots': 'max_upload_slots',
'max_upload_speed': 'max_upload_speed',
'message': 'message',
'move_on_completed': 'move_on_completed',
'next_announce': 'next_announce',
'num_files': 'num_files',
'num_pieces': 'num_pieces',
'paused': 'paused',
'peers': 'peers',
'piece_length': 'piece_length',
'prioritize_first_last': 'prioritize_first_last',
'queue': 'queue',
'remove_at_ratio': 'remove_at_ratio',
'seed_rank': 'seed_rank',
'stop_at_ratio': 'stop_at_ratio',
'stop_ratio': 'stop_ratio',
'total_done': 'total_done',
'total_payload_download': 'total_payload_download',
'total_payload_upload': 'total_payload_upload',
'total_peers': 'total_peers',
'total_seeds': 'total_seeds',
'total_uploaded': 'total_uploaded',
'total_wanted': 'total_wanted',
'tracker': 'tracker',
'tracker_host': 'tracker_host',
'tracker_status': 'tracker_status',
'trackers': 'trackers',
'upload_payload_rate': 'upload_payload_rate'
}
def __init__(self):
self.entries = []
schema = {
'anyOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'host': {'type': 'string'},
'port': {'type': 'integer'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'config_path': {'type': 'string', 'format': 'path'},
'filter': {
'type': 'object',
'properties': {
'label': {'type': 'string'},
'state': {
'type': 'string',
'enum': ['active', 'downloading', 'seeding', 'queued', 'paused']
}
},
'additionalProperties': False
},
'keys': {
'type': 'array',
'items': {
'type': 'string',
'enum': list(extra_settings_map)
}
}
},
'additionalProperties': False
}
]
}
def prepare_config(self, config):
if isinstance(config, bool):
config = {}
if 'filter' in config:
filter = config['filter']
if 'label' in filter:
filter['label'] = filter['label'].lower()
if 'state' in filter:
filter['state'] = filter['state'].capitalize()
self.prepare_connection_info(config)
return config
def on_task_input(self, task, config):
"""Generates and returns a list of entries from the deluge daemon."""
# Reset the entries list
self.entries = []
# Call connect, entries get generated if everything is successful
self.connect(task, self.prepare_config(config))
return self.entries
def on_connect_success(self, result, task, config):
"""Creates a list of FlexGet entries from items loaded in deluge and stores them to self.entries"""
from deluge.ui.client import client
def on_get_torrents_status(torrents):
config_path = os.path.expanduser(config.get('config_path', ''))
for hash, torrent_dict in torrents.items():
# Make sure it has a url so no plugins crash
entry = Entry(deluge_id=hash, url='')
if config_path:
torrent_path = os.path.join(config_path, 'state', hash + '.torrent')
if os.path.isfile(torrent_path):
entry['location'] = torrent_path
if not torrent_path.startswith('/'):
torrent_path = '/' + torrent_path
entry['url'] = 'file://' + torrent_path
else:
log.warning('Did not find torrent file at %s' % torrent_path)
for key, value in torrent_dict.items():
if key in self.settings_map:
flexget_key = self.settings_map[key]
else:
flexget_key = self.extra_settings_map[key]
if isinstance(flexget_key, tuple):
flexget_key, format_func = flexget_key
value = format_func(value)
entry[flexget_key] = value
self.entries.append(entry)
client.disconnect()
filter = config.get('filter', {})
# deluge client lib chokes on future's newlist, make sure we have a native python list here
client.core.get_torrents_status(filter, native(list(self.settings_map.keys()) + config.get('keys', []))).addCallback(
on_get_torrents_status)
class OutputDeluge(DelugePlugin):
"""Add the torrents directly to deluge, supporting custom save paths."""
schema = {
'anyOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'host': {'type': 'string'},
'port': {'type': 'integer'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'path': {'type': 'string'},
'movedone': {'type': 'string'},
'label': {'type': 'string'},
'queuetotop': {'type': 'boolean'},
'automanaged': {'type': 'boolean'},
'maxupspeed': {'type': 'number'},
'maxdownspeed': {'type': 'number'},
'maxconnections': {'type': 'integer'},
'maxupslots': {'type': 'integer'},
'ratio': {'type': 'number'},
'removeatratio': {'type': 'boolean'},
'addpaused': {'type': 'boolean'},
'compact': {'type': 'boolean'},
'content_filename': {'type': 'string'},
'main_file_only': {'type': 'boolean'},
'main_file_ratio': {'type': 'number'},
'magnetization_timeout': {'type': 'integer'},
'keep_subs': {'type': 'boolean'},
'hide_sparse_files': {'type': 'boolean'},
'enabled': {'type': 'boolean'},
'container_directory': {'type': 'string'},
},
'additionalProperties': False
}
]
}
def prepare_config(self, config):
if isinstance(config, bool):
config = {'enabled': config}
self.prepare_connection_info(config)
config.setdefault('enabled', True)
config.setdefault('path', '')
config.setdefault('movedone', '')
config.setdefault('label', '')
config.setdefault('main_file_ratio', 0.90)
config.setdefault('magnetization_timeout', 0)
config.setdefault('keep_subs', True) # does nothing without 'content_filename' or 'main_file_only' enabled
config.setdefault('hide_sparse_files', False) # does nothing without 'main_file_only' enabled
return config
def __init__(self):
self.deluge_version = None
self.options = {'maxupspeed': 'max_upload_speed', 'maxdownspeed': 'max_download_speed',
'maxconnections': 'max_connections', 'maxupslots': 'max_upload_slots',
'automanaged': 'auto_managed', 'ratio': 'stop_ratio', 'removeatratio': 'remove_at_ratio',
'addpaused': 'add_paused', 'compact': 'compact_allocation'}
@plugin.priority(120)
def on_task_download(self, task, config):
"""
Call download plugin to generate the temp files we will load into deluge
then verify they are valid torrents
"""
import deluge.ui.common
config = self.prepare_config(config)
if not config['enabled']:
return
# If the download plugin is not enabled, we need to call it to get our temp .torrent files
if 'download' not in task.config:
download = plugin.get_plugin_by_name('download')
for entry in task.accepted:
if not entry.get('deluge_id'):
download.instance.get_temp_file(task, entry, handle_magnets=True)
# Check torrent files are valid
for entry in task.accepted:
if os.path.exists(entry.get('file', '')):
# Check if downloaded file is a valid torrent file
try:
deluge.ui.common.TorrentInfo(entry['file'])
except Exception:
entry.fail('Invalid torrent file')
log.error('Torrent file appears invalid for: %s', entry['title'])
@plugin.priority(135)
def on_task_output(self, task, config):
"""Add torrents to deluge at exit."""
config = self.prepare_config(config)
# don't add when learning
if task.options.learn:
return
if not config['enabled'] or not (task.accepted or task.options.test):
return
self.connect(task, config)
# Clean up temp file if download plugin is not configured for this task
if 'download' not in task.config:
for entry in task.accepted + task.failed:
if os.path.exists(entry.get('file', '')):
os.remove(entry['file'])
del (entry['file'])
def on_connect_success(self, result, task, config):
"""Gets called when successfully connected to a daemon."""
from deluge.ui.client import client
from twisted.internet import reactor, defer
if not result:
log.debug('on_connect_success returned a failed result. BUG?')
if task.options.test:
log.debug('Test connection to deluge daemon successful.')
client.disconnect()
return
def format_label(label):
"""Makes a string compliant with deluge label naming rules"""
return re.sub('[^\w-]+', '_', label.lower())
def set_torrent_options(torrent_id, entry, opts):
"""Gets called when a torrent was added to the daemon."""
dlist = []
if not torrent_id:
log.error('There was an error adding %s to deluge.' % entry['title'])
# TODO: Fail entry? How can this happen still now?
return
log.info('%s successfully added to deluge.' % entry['title'])
entry['deluge_id'] = torrent_id
def create_path(result, path):
"""Creates the specified path if deluge is older than 1.3"""
from deluge.common import VersionSplit
# Before 1.3, deluge would not create a non-existent move directory, so we need to.
if VersionSplit('1.3.0') > VersionSplit(self.deluge_version):
if client.is_localhost():
if not os.path.isdir(path):
log.debug('path %s doesn\'t exist, creating' % path)
os.makedirs(path)
else:
log.warning('If path does not exist on the machine running the daemon, move will fail.')
if opts.get('movedone'):
dlist.append(version_deferred.addCallback(create_path, opts['movedone']))
dlist.append(client.core.set_torrent_move_completed(torrent_id, True))
dlist.append(client.core.set_torrent_move_completed_path(torrent_id, opts['movedone']))
log.debug('%s move on complete set to %s' % (entry['title'], opts['movedone']))
if opts.get('label'):
def apply_label(result, torrent_id, label):
"""Gets called after labels and torrent were added to deluge."""
return client.label.set_torrent(torrent_id, label)
dlist.append(label_deferred.addCallback(apply_label, torrent_id, opts['label']))
if opts.get('queuetotop') is not None:
if opts['queuetotop']:
dlist.append(client.core.queue_top([torrent_id]))
log.debug('%s moved to top of queue' % entry['title'])
else:
dlist.append(client.core.queue_bottom([torrent_id]))
log.debug('%s moved to bottom of queue' % entry['title'])
def on_get_torrent_status(status):
"""Gets called with torrent status, including file info.
Sets the torrent options which require knowledge of the current status of the torrent."""
main_file_dlist = []
# Determine where the file should be
move_now_path = None
if opts.get('movedone'):
if status['progress'] == 100:
move_now_path = opts['movedone']
else:
# Deluge will unset the move completed option if we move the storage, forgo setting proper
# path, in favor of leaving proper final location.
log.debug('Not moving storage for %s, as this will prevent movedone.' % entry['title'])
elif opts.get('path'):
move_now_path = opts['path']
if move_now_path and os.path.normpath(move_now_path) != os.path.normpath(status['save_path']):
main_file_dlist.append(version_deferred.addCallback(create_path, move_now_path))
log.debug('Moving storage for %s to %s' % (entry['title'], move_now_path))
main_file_dlist.append(client.core.move_storage([torrent_id], move_now_path))
big_file_name = ''
if opts.get('content_filename') or opts.get('main_file_only'):
def file_exists(filename):
# Checks the download path as well as the move completed path for existence of the file
if os.path.exists(os.path.join(status['save_path'], filename)):
return True
elif status.get('move_on_completed') and status.get('move_on_completed_path'):
if os.path.exists(os.path.join(status['move_on_completed_path'], filename)):
return True
else:
return False
def unused_name(name):
# If on local computer, tries appending a (#) suffix until a unique filename is found
if client.is_localhost():
counter = 2
while file_exists(name):
name = ''.join([os.path.splitext(name)[0],
" (", str(counter), ')',
os.path.splitext(name)[1]])
counter += 1
else:
log.debug('Cannot ensure content_filename is unique '
'when adding to a remote deluge daemon.')
return name
def rename(file, new_name):
# Renames a file in torrent
main_file_dlist.append(
client.core.rename_files(torrent_id,
[(file['index'], new_name)]))
log.debug('File %s in %s renamed to %s' % (file['path'], entry['title'], new_name))
# find a file that makes up more than main_file_ratio (default: 90%) of the total size
main_file = None
for file in status['files']:
if file['size'] > (status['total_size'] * opts.get('main_file_ratio')):
main_file = file
break
if main_file is not None:
# proceed with renaming only if such a big file is found
# find the subtitle file
keep_subs = opts.get('keep_subs')
sub_file = None
if keep_subs:
sub_exts = [".srt", ".sub"]
for file in status['files']:
ext = os.path.splitext(file['path'])[1]
if ext in sub_exts:
sub_file = file
break
# check for single file torrents so we dont add unnecessary folders
if (os.path.dirname(main_file['path']) is not ("" or "/")):
# check for top folder in user config
if (opts.get('content_filename') and os.path.dirname(opts['content_filename']) is not ""):
top_files_dir = os.path.dirname(opts['content_filename']) + "/"
else:
top_files_dir = os.path.dirname(main_file['path']) + "/"
else:
top_files_dir = "/"
if opts.get('content_filename'):
# rename the main file
big_file_name = (top_files_dir +
os.path.basename(opts['content_filename']) +
os.path.splitext(main_file['path'])[1])
big_file_name = unused_name(big_file_name)
rename(main_file, big_file_name)
# rename subs along with the main file
if sub_file is not None and keep_subs:
sub_file_name = (os.path.splitext(big_file_name)[0] +
os.path.splitext(sub_file['path'])[1])
rename(sub_file, sub_file_name)
if opts.get('main_file_only'):
# download only the main file (and subs)
file_priorities = [1 if f == main_file or (f == sub_file and keep_subs) else 0
for f in status['files']]
main_file_dlist.append(
client.core.set_torrent_file_priorities(torrent_id, file_priorities))
if opts.get('hide_sparse_files'):
# hide the other sparse files that are not supposed to download but are created anyway
# http://dev.deluge-torrent.org/ticket/1827
# Made sparse files behave better with deluge http://flexget.com/ticket/2881
sparse_files = [f for f in status['files']
if f != main_file and (f != sub_file or (not keep_subs))]
rename_pairs = [(f['index'],
top_files_dir + ".sparse_files/" + os.path.basename(f['path']))
for f in sparse_files]
main_file_dlist.append(client.core.rename_files(torrent_id, rename_pairs))
else:
log.warning('No files in "%s" are > %d%% of content size, no files renamed.' % (
entry['title'],
opts.get('main_file_ratio') * 100))
container_directory = pathscrub(entry.render(entry.get('container_directory', config.get('container_directory', ''))))
if container_directory:
if big_file_name:
folder_structure = big_file_name.split(os.sep)
elif len(status['files']) > 0:
folder_structure = status['files'][0]['path'].split(os.sep)
else:
folder_structure = []
if len(folder_structure) > 1:
log.verbose('Renaming Folder %s to %s', folder_structure[0], container_directory)
main_file_dlist.append(client.core.rename_folder(torrent_id, folder_structure[0], container_directory))
else:
log.debug('container_directory specified however the torrent %s does not have a directory structure; skipping folder rename', entry['title'])
return defer.DeferredList(main_file_dlist)
status_keys = ['files', 'total_size', 'save_path', 'move_on_completed_path',
'move_on_completed', 'progress']
dlist.append(client.core.get_torrent_status(torrent_id, status_keys).addCallback(on_get_torrent_status))
return defer.DeferredList(dlist)
def on_fail(result, task, entry):
"""Gets called when daemon reports a failure adding the torrent."""
log.info('%s was not added to deluge! %s' % (entry['title'], result))
entry.fail('Could not be added to deluge')
# dlist is a list of deferreds that must complete before we exit
dlist = []
# loop through entries to get a list of labels to add
labels = set()
for entry in task.accepted:
if entry.get('label', config.get('label')):
try:
label = format_label(entry.render(entry.get('label', config.get('label'))))
log.debug('Rendered label: %s', label)
except RenderError as e:
log.error('Error rendering label `%s`: %s', label, e)
continue
labels.add(label)
label_deferred = defer.succeed(True)
if labels:
# Make sure the label plugin is available and enabled, then add appropriate labels
def on_get_enabled_plugins(plugins):
"""Gets called with the list of enabled deluge plugins."""
def on_label_enabled(result):
""" This runs when we verify the label plugin is enabled. """
def on_get_labels(d_labels):
"""Gets available labels from deluge, and adds any new labels we need."""
dlist = []
for label in labels:
if label not in d_labels:
log.debug('Adding the label `%s` to deluge', label)
dlist.append(client.label.add(label))
return defer.DeferredList(dlist)
return client.label.get_labels().addCallback(on_get_labels)
if 'Label' in plugins:
return on_label_enabled(True)
else:
# Label plugin isn't enabled, so we check if it's available and enable it.
def on_get_available_plugins(plugins):
"""Gets plugins available to deluge, enables Label plugin if available."""
if 'Label' in plugins:
log.debug('Enabling label plugin in deluge')
return client.core.enable_plugin('Label').addCallback(on_label_enabled)
else:
log.error('Label plugin is not installed in deluge')
return client.core.get_available_plugins().addCallback(on_get_available_plugins)
label_deferred = client.core.get_enabled_plugins().addCallback(on_get_enabled_plugins)
dlist.append(label_deferred)
def on_get_daemon_info(ver):
"""Gets called with the daemon version info, stores it in self."""
log.debug('deluge version %s' % ver)
self.deluge_version = ver
version_deferred = client.daemon.info().addCallback(on_get_daemon_info)
dlist.append(version_deferred)
def on_get_session_state(torrent_ids):
"""Gets called with a list of torrent_ids loaded in the deluge session.
Adds new torrents and modifies the settings for ones already in the session."""
dlist = []
# add the torrents
for entry in task.accepted:
@defer.inlineCallbacks
def _wait_for_metadata(torrent_id, timeout):
log.verbose('Waiting %d seconds for "%s" to magnetize' % (timeout, entry['title']))
for _ in range(timeout):
time.sleep(1)
try:
status = yield client.core.get_torrent_status(torrent_id, ['files'])
except Exception as err:
log.error('wait_for_metadata Error: %s' % err)
break
if status.get('files'):
log.info('"%s" magnetization successful' % (entry['title']))
break
else:
log.warning('"%s" did not magnetize before the timeout elapsed, '
'file list unavailable for processing.' % entry['title'])
defer.returnValue(torrent_id)
def add_entry(entry, opts):
"""Adds an entry to the deluge session"""
magnet, filedump = None, None
if entry.get('url', '').startswith('magnet:'):
magnet = entry['url']
else:
if not os.path.exists(entry['file']):
entry.fail('Downloaded temp file \'%s\' doesn\'t exist!' % entry['file'])
del (entry['file'])
return
with open(entry['file'], 'rb') as f:
filedump = base64.encodestring(f.read())
log.verbose('Adding %s to deluge.' % entry['title'])
if magnet:
d = client.core.add_torrent_magnet(magnet, opts)
if config.get('magnetization_timeout'):
d.addCallback(_wait_for_metadata, config['magnetization_timeout'])
return d
else:
return client.core.add_torrent_file(entry['title'], filedump, opts)
# Generate deluge options dict for torrent add
add_opts = {}
try:
path = entry.render(entry.get('path', config['path']))
if path:
add_opts['download_location'] = pathscrub(os.path.expanduser(path))
except RenderError as e:
log.error('Could not set path for %s: %s' % (entry['title'], e))
for fopt, dopt in self.options.items():
value = entry.get(fopt, config.get(fopt))
if value is not None:
add_opts[dopt] = value
if fopt == 'ratio':
add_opts['stop_at_ratio'] = True
# Make another set of options, that get set after the torrent has been added
modify_opts = {
'queuetotop': entry.get('queuetotop', config.get('queuetotop')),
'main_file_only': entry.get('main_file_only', config.get('main_file_only', False)),
'main_file_ratio': entry.get('main_file_ratio', config.get('main_file_ratio')),
'hide_sparse_files': entry.get('hide_sparse_files', config.get('hide_sparse_files', True)),
'keep_subs': entry.get('keep_subs', config.get('keep_subs', True))
}
try:
label = entry.render(entry.get('label', config['label']))
modify_opts['label'] = format_label(label)
except RenderError as e:
log.error('Error setting label for `%s`: %s', entry['title'], e)
try:
movedone = entry.render(entry.get('movedone', config['movedone']))
modify_opts['movedone'] = pathscrub(os.path.expanduser(movedone))
except RenderError as e:
log.error('Error setting movedone for %s: %s' % (entry['title'], e))
try:
content_filename = entry.get('content_filename', config.get('content_filename', ''))
modify_opts['content_filename'] = pathscrub(entry.render(content_filename))
except RenderError as e:
log.error('Error setting content_filename for %s: %s' % (entry['title'], e))
torrent_id = entry.get('deluge_id') or entry.get('torrent_info_hash')
torrent_id = torrent_id and torrent_id.lower()
if torrent_id in torrent_ids:
log.info('%s is already loaded in deluge, setting options' % entry['title'])
# Entry has a deluge id, verify the torrent is still in the deluge session and apply options
# Since this is already loaded in deluge, we may also need to change the path
modify_opts['path'] = add_opts.pop('download_location', None)
dlist.extend([set_torrent_options(torrent_id, entry, modify_opts),
client.core.set_torrent_options([torrent_id], add_opts)])
else:
dlist.append(add_entry(entry, add_opts).addCallbacks(
set_torrent_options, on_fail, callbackArgs=(entry, modify_opts), errbackArgs=(task, entry)))
return defer.DeferredList(dlist)
dlist.append(client.core.get_session_state().addCallback(on_get_session_state))
def on_complete(result):
"""Gets called when all of our tasks for deluge daemon are complete."""
client.disconnect()
tasks = defer.DeferredList(dlist).addBoth(on_complete)
def on_timeout(result):
"""Gets called if tasks have not completed in 30 seconds.
Should only happen when something goes wrong."""
log.error('Timed out while adding torrents to deluge.')
log.debug('dlist: %s' % result.resultList)
client.disconnect()
# Schedule a disconnect to happen if FlexGet hangs while connected to Deluge
# Leave the timeout long, to give time for possible lookups to occur
reactor.callLater(600, lambda: tasks.called or on_timeout(tasks))
def on_task_learn(self, task, config):
""" Make sure all temp files are cleaned up when entries are learned """
# If download plugin is enabled, it will handle cleanup.
if 'download' not in task.config:
download = plugin.get_plugin_by_name('download')
download.instance.cleanup_temp_files(task)
def on_task_abort(self, task, config):
"""Make sure normal cleanup tasks still happen on abort."""
DelugePlugin.on_task_abort(self, task, config)
self.on_task_learn(task, config)
@event('plugin.register')
def register_plugin():
plugin.register(InputDeluge, 'from_deluge', api_ver=2)
plugin.register(OutputDeluge, 'deluge', api_ver=2)
| mit |
mozts2005/OuterSpace | server/lib/ige/ospace/Const.py | 2 | 6566 | #
# Copyright 2001 - 2006 Ludek Smid [http://www.ospace.net/]
#
# This file is part of IGE - Outer Space.
#
# IGE - Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# IGE - Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IGE - Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from ige.Const import *
## additional object types
T_GALAXY = 100
T_SYSTEM = 101
T_PLANET = 102
T_FLEET = 103
T_ALLIANCE = 104
T_TECHNOLOGY = 105
T_ASTEROID = 106
T_NATURE = 107
T_AIPLAYER = 108
T_AIRENPLAYER = 109
T_AIMUTPLAYER = 110
T_AIPIRPLAYER = 111
T_AIEDENPLAYER = 112
T_PIRPLAYER = 113
T_WORMHOLE = 114
PLAYER_TYPES = (T_PLAYER, T_NATURE, T_AIPLAYER, T_AIRENPLAYER, T_AIMUTPLAYER,
T_AIPIRPLAYER, T_AIEDENPLAYER, T_PIRPLAYER)
## debug types
T_STRUCT = 300
T_TASK = 301
T_SCAN = 302
T_RESTASK = 303
T_SHIP = 304
T_STATS = 305
T_DIPLREL = 306
## additional indexes and OIDs
OID_NATURE = 60
## messages
MSG_WASTED_SCIPTS = 1001 # waste research points
MSG_COMPLETED_RESEARCH = 1002 # research completed
MSG_CANNOTBUILD_SHLOST = 1003 # cannot colonize planets - ship lost
MSG_CANNOTBUILD_NOSLOT = 1004 # cannot build - no free slot
# NOT NEEDED MSG_DESTROYED_BUILDING = 1005 # building has been destroyed
MSG_WASTED_PRODPTS = 1006 # waste production points
MSG_LOST_PLANET = 1007 # planet lost
MSG_COMPLETED_STRUCTURE = 1008 # construction of structure completed
MSG_COMPLETED_SHIP = 1009 # construction of ship completed
MSG_GAINED_PLANET = 1010 # planet gained
MSG_COMBAT_RESULTS = 1011 # damage taken, caused and lost ships/structures
MSG_EXTRACTED_STRATRES = 1012 # found strategy resource
MSG_COMBAT_LOST = 1013 # surrender to
MSG_DESTROYED_FLEET = 1014 # fleet destroyed
MSG_COMBAT_WON = 1015 # combat won
MSG_NEW_GOVCENTER = 1016 # new government centre
MSG_REVOLT_STARTED = 1017 # somebody started rebelling
MSG_REVOLT_ENDED = 1018 # somebody ended rebelling
MSG_INVALID_TASK = 1019 # task does not pass validity checks
MSG_NOSUPPORT_POP = 1020 # population has not enought support
MSG_COMPLETED_PROJECT = 1021 # project has been completed
MSG_ENABLED_TIME = 1022 # time has been started
MSG_MISSING_STRATRES = 1023 # missing strategic resource
MSG_DOWNGRADED_PLANET_ECO = 1024 # planet's ecology system was downgraded
MSG_UPGRADED_PLANET_ECO = 1025 # planet's ecology system was upgraded
MSG_UPGRADED_SHIP = 1026 # ship upgraded
MSG_DELETED_DESIGN = 1027 # design has been deleted
MSG_CANNOT_UPGRADE_SR = 1028 # cannot upgrade ship, strat. res. missing
MSG_DELETED_RESEARCH = 1029 # research deleted (disabled)
MSG_DAMAGE_BY_SG = 1030 # deceleration damage from Star Gate
MSG_GAINED_FAME = 1031 # pirate gained fame
MSG_LOST_FAME = 1032 # pirate lost some fame
MSG_GAINED_TECH = 1033 # pirate gained technology
MSG_EXTRACTED_ANTIMATTER_SYNTH = 1034 # Antimatter Synthesis success
MSG_ENTERED_WORMHOLE = 1035 # Wormhole Entered
MSG_NOT_ENTERED_WORMHOLE = 1036 # Failed to enter wormhole on enter wormhole command
MSG_FOUND_WORMHOLE = 1037 #todo
MSG_DELOY_HANDLER = 1038
MSG_GNC_EMR_FORECAST = 2000 # Electromagnetic radiation (EMR) forecast
MSG_GNC_EMR_CURRENT_LVL = 2001 # Curren level of EMR
MSG_GNC_GALAXY_RESTARTED = 2002 # Galaxy was restarted
MSG_GNC_VOTING_COMING = 2003 # new voting in 1:00 turna
MSG_GNC_VOTING_NOWINNER = 2004 # no winner in elections
MSG_GNC_VOTING_LEADER = 2005 # winner is leader
MSG_GNC_VOTING_IMPERATOR = 2006 # winner is imperator
MSG_GNC_GALAXY_GENERATOR = 2007 # galaxy generator information
MSG_GNC_GALAXY_AUTO_RESTARTED = 2008 # Galaxy was automatically restarted
## relations
REL_ENEMY_LO = 0
REL_ENEMY = 0
REL_ENEMY_HI = 125
REL_UNFRIENDLY_LO = 125
REL_UNFRIENDLY = 250
REL_UNFRIENDLY_HI = 375
REL_NEUTRAL_LO = 375
REL_NEUTRAL = 500
REL_NEUTRAL_HI = 625
REL_FRIENDLY_LO = 625
REL_FRIENDLY = 750
REL_FRIENDLY_HI = 875
REL_ALLY_LO = 875
REL_ALLY = 1000
REL_ALLY_HI = 1000
REL_UNITY = 1250
REL_UNDEF = 100000
REL_DEFAULT = REL_NEUTRAL
## pact values
PACT_OFF = 0
PACT_INACTIVE = 1
PACT_ACTIVE = 2
## pacts
# fleet related
PACT_ALLOW_CIVILIAN_SHIPS = 100
PACT_ALLOW_MILITARY_SHIPS = 101
PACT_ALLOW_TANKING = 102
# civilian
PACT_MINOR_SCI_COOP = 200
PACT_MAJOR_SCI_COOP = 201
PACT_MINOR_CP_COOP = 202
PACT_MAJOR_CP_COOP = 203
# military
PACT_SHARE_SCANNER = 300
# governor
PACT_ACCEPT_AS_GOVERNOR = 400
PACT_OFFER_TO_BE_GOVERNOR = 401
## fleet actions
FLACTION_NONE = 0
FLACTION_MOVE = 1
FLACTION_DECLAREWAR = 1000
FLACTION_DEPLOY = 1002
FLACTION_REDIRECT = 1003
FLACTION_REFUEL = 1004
FLACTION_REPEATFROM = 1005
FLACTION_WAIT = 1006
FLACTION_ENTERWORMHOLE = 1007
## ship attributes
SHIP_IDX_DESIGNID = 0
SHIP_IDX_HP = 1
SHIP_IDX_SHIELDHP = 2
SHIP_IDX_EXP = 3
## structure status
STRUCT_STATUS_NOTOPER = 0x0000
STRUCT_STATUS_OK = 0x0001
STRUCT_STATUS_DETER = 0x0002
STRUCT_STATUS_NOBIO = 0x0004
# NOT NEEDED NOSTRUCT_STATUS_NOMIN = 0x0008
STRUCT_STATUS_NOEN = 0x0010
STRUCT_STATUS_NOPOP = 0x0020
STRUCT_STATUS_NEW = 0x0040
STRUCT_STATUS_REPAIRING = 0x0080
STRUCT_STATUS_ON = 0x1000
STRUCT_STATUS_OFF = 0x0000
STRUCT_STATUS_RESETFLGS = 0xf000
STRUCT_IDX_TECHID = 0
STRUCT_IDX_HP = 1
STRUCT_IDX_STATUS = 2
STRUCT_IDX_OPSTATUS = 3
## contact types
CONTACT_NONE = 0
CONTACT_DYNAMIC = 1
CONTACT_STATIC = 2
## starting positons types
STARTPOS_NEWPLAYER = 1
STARTPOS_AIPLAYER = 2
STARTPOS_PIRATE = 3
## strategic resources
SR_NONE = 0
# TL 1 + TL 2
SR_TL1A = 1 # URANIUM
SR_TL1B = 2 # TITAN
# TL 3 + TL 4
SR_TL3A = 3
SR_TL3B = 4
SR_TL3C = 5
# TL 5
SR_TL5A = 6
SR_TL5B = 7
SR_TL5C = 8
# TL ALIENS
SR_TLAA = 1000 # Unnilseptium
# "goods"
SR_BIO1 = 100
## ship improvements
SI_SPEED = 1
SI_TANKS = 2
SI_ATT = 3
SI_DEF = 4
SI_HP = 5
SI_SHIELDS = 6
SI_SIGNATURE = 7
SI_SCANNERPWR = 8
## type of buoys
BUOY_NONE = 0
BUOY_PRIVATE = 1
BUOY_TO_ALLY = 2
BUOY_FROM_ALLY = 3
BUOY_TO_SCANNERSHARE = 4
| gpl-2.0 |
u3mur4/syndicate | synx.py | 1 | 7902 | #! /usr/bin/env python
from subprocess import Popen,PIPE,STDOUT
import collections
import os
import sys
import time
import math
import os
from urllib2 import urlopen
SERVER_IP = urlopen('https://api.ipify.org/').read()
NODE_LIST = urlopen('https://pastebin.com/raw/haX0XxCA').read()
BOOTSTRAP_URL = "https://mega.nz/#!5jYHDYJJ!Az4x8AQB6sqVgrS8R3HvR8k66CvJI8k-kzFP8Ua8zts"
WALLET_URL = "https://github.com/SyndicateLtd/SyndicateQt/releases/download/x2.1.0/Syndicate-2.1.0-linux64.zip"
MN_DAEMON = "syndicated"
MN_CLI = "syndicate-cli"
MN_LFOLDER = ".syndicate"
DEFAULT_COLOR = "\x1b[0m"
PRIVATE_KEYS = []
def print_info(message):
BLUE = '\033[94m'
print(BLUE + "[*] " + str(message) + DEFAULT_COLOR)
time.sleep(1)
def print_warning(message):
YELLOW = '\033[93m'
print(YELLOW + "[*] " + str(message) + DEFAULT_COLOR)
time.sleep(1)
def print_error(message):
RED = '\033[91m'
print(RED + "[*] " + str(message) + DEFAULT_COLOR)
time.sleep(0.5)
def get_terminal_size():
import fcntl, termios, struct
h, w, hp, wp = struct.unpack('HHHH',
fcntl.ioctl(0, termios.TIOCGWINSZ,
struct.pack('HHHH', 0, 0, 0, 0)))
return w, h
def remove_lines(lines):
CURSOR_UP_ONE = '\x1b[1A'
ERASE_LINE = '\x1b[2K'
for l in lines:
sys.stdout.write(CURSOR_UP_ONE + '\r' + ERASE_LINE)
sys.stdout.flush()
def run_command(command):
out = Popen(command, stderr=STDOUT, stdout=PIPE, shell=True)
lines = []
while True:
line = out.stdout.readline()
if (line == ""):
break
# remove previous lines
remove_lines(lines)
w, h = get_terminal_size()
lines.append(line.strip().encode('string_escape')[:w-3] + "\n")
if(len(lines) >= 10):
del lines[0]
# print lines again
for l in lines:
sys.stdout.write('\r')
sys.stdout.write(l)
sys.stdout.flush()
remove_lines(lines)
out.wait()
def print_welcome():
os.system('clear')
print(" _____ _ _ _ ")
print(" / ____| | (_) | | ")
print(" | (___ _ _ _ __ __| |_ ___ __ _| |_ ___ ")
print(" \___ \| | | | '_ \ / _` | |/ __/ _` | __/ _ \\")
print(" ____) | |_| | | | | (_| | | (_| (_| | || __/")
print(" |_____/ \__, |_| |_|\__,_|_|\___\__,_|\__\___|")
print(" __/ | ")
print(" |___/ ")
print("")
print_info("Syndicate masternode installer v1.3")
def update_system():
print_info("Updating the system...")
run_command("apt-get update")
run_command("apt-get upgrade -y")
def chech_root():
print_info("Check root privileges")
user = os.getuid()
if user != 0:
print_error("This program requires root privileges. Run as root user.")
sys.exit(-1)
def install_wallet():
print_info("Allocating swap...")
run_command("fallocate -l 3G /swapfile")
run_command("chmod 600 /swapfile")
run_command("mkswap /swapfile")
run_command("swapon /swapfile")
f = open('/etc/fstab','r+b')
line = '/swapfile none swap sw 0 0 \n'
lines = f.readlines()
if (lines[-1] != line):
f.write(line)
f.close()
print_info("Installing wallet build dependencies...")
run_command("apt-get --assume-yes install git unzip")
is_install = True
if os.path.isfile('/usr/local/bin/syndicated'):
print_warning('Wallet already installed on the system')
is_install = False
if is_install:
print_info("Downloading wallet...")
run_command("wget {} -O /tmp/wallet.zip".format(WALLET_URL ))
print_info("Installing wallet...")
run_command("cd /tmp && unzip -u wallet.zip")
run_command("find /tmp -name {} -exec cp {{}} /usr/local/bin \;".format(MN_DAEMON))
run_command("find /tmp -name {} -exec cp {{}} /usr/local/bin \;".format(MN_CLI))
run_command("chmod +x /usr/local/bin/{} /usr/local/bin/{}".format(MN_DAEMON, MN_CLI))
def autostart_masternode(user):
job = "@reboot /usr/local/bin/syndicated\n"
p = Popen("crontab -l -u {} 2> /dev/null".format(user), stderr=STDOUT, stdout=PIPE, shell=True)
p.wait()
lines = p.stdout.readlines()
if job not in lines:
print_info("Cron job doesn't exist yet, adding it to crontab")
lines.append(job)
p = Popen('echo "{}" | crontab -u {} -'.format(''.join(lines), user), stderr=STDOUT, stdout=PIPE, shell=True)
p.wait()
def setup_masternode():
print_info("Setting up first masternode")
run_command("useradd --create-home -G sudo mn1")
print_info("Open your desktop wallet config file (%appdata%/syndicate/syndicate.conf) and copy your rpc username and password! If it is not there create one! E.g.:\n\trpcuser=[SomeUserName]\n\trpcpassword=[DifficultAndLongPassword]")
global rpc_username
global rpc_password
rpc_username = raw_input("rpcuser: ")
rpc_password = raw_input("rpcpassword: ")
print_info("Open your wallet console (Help => Debug window => Console) and create a new masternode private key: masternode genkey")
masternode_priv_key = raw_input("masternodeprivkey: ")
PRIVATE_KEYS.append(masternode_priv_key)
config = """rpcuser={}
rpcpassword={}
rpcallowip=127.0.0.1
rpcport=25993
port=25992
server=1
listen=1
daemon=1
logtimestamps=1
mnconflock=1
masternode=1
masternodeaddr={}:25992
masternodeprivkey={}
{}""".format(rpc_username, rpc_password, SERVER_IP, masternode_priv_key, NODE_LIST)
print_info("Saving config file...")
run_command('su - mn1 -c "{}" '.format("mkdir -p /home/mn1/.syndicate/ && touch /home/mn1/.syndicate/syndicate.conf"))
f = open('/home/mn1/.syndicate/syndicate.conf', 'w')
f.write(config)
f.close()
print_info("Downloading bootstrap...")
run_command("apt-get --assume-yes install megatools")
filename = "blockchain.rar"
run_command('su - mn1 -c "{}" '.format("cd && megadl '{}' --path {} 2>/dev/null".format(BOOTSTRAP_URL, filename)))
print_info("Unzipping the file...")
run_command("apt-get --assume-yes install unrar")
run_command('su - mn1 -c "{}" '.format("cd && unrar x -o+ {} {}".format(filename, MN_LFOLDER)))
#run_command('rm /home/mn1/.syndicate/peers.dat')
autostart_masternode('mn1')
os.system('su - mn1 -c "{}" '.format('syndicated -daemon'))
print_warning("Masternode started syncing in the background...")
def porologe():
mn_base_data = """
Alias: Masternode{}
IP: {}
Private key: {}
Transaction ID: [5k desposit transaction id. 'masternode outputs']
Transaction index: [5k desposit transaction index. 'masternode outputs']
--------------------------------------------------
"""
mn_data = ""
for idx, val in enumerate(PRIVATE_KEYS):
mn_data += mn_base_data.format(idx+1, SERVER_IP + ":" + str(25992 + idx), val)
imp = """Vs lbh sbhaq gur thvqr naq guvf fpevcg hfrshy pbafvqre gb fhccbeg zr.\n\tFLAK: FAbTfY8Rw7QhLpk5i2Ll1UsKxZbMMzmRlz\n\tOGP: 33PeQClZcpjWSlZGprIZGYWLYE8mOFfaJz\n\tRGU: 0k9n794240o456O8qQ5593n7r8q7NR92s4pn4Q9Q2s\n"""
print('')
print_info(
"""Masternodes setup finished!
\tWait until masternode is fully synced. To check the progress login the
\tmasternode account (su mn1) and run
\tthe 'syndicate-cli getblockchaininfo' to get actual block number. Go to
\thttp://explorer.synx.online/ website to check the latest block number. After the
\tsyncronization is done add your masternode to your desktop wallet.
Data:""" + mn_data)
print_warning(imp.decode('rot13').decode('unicode-escape'))
def main():
print_welcome()
chech_root()
update_system()
install_wallet()
setup_masternode()
porologe()
if __name__ == "__main__":
main()
| apache-2.0 |
relue2718/diff-scraper | diffscraper/libdiffscraper/tokenizer.py | 1 | 3927 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Author: Seunghyun Yoo (shyoo1st@cs.ucla.edu)
"""
from . import htmlparser, textparser
class Tokenizer(object):
def __init__(self):
pass
@staticmethod
def feature(parser_type, raw_string):
parser = Tokenizer.create_parser(parser_type)
parser.clear(is_collecting_meta=True)
parser.feed(raw_string)
parser.close()
return parser.tokens_meta
@staticmethod
def tokenize(parser_type, raw_string):
"""
Since a parser just returns metadata of type, line number and offset,
we have to construct a list of string tokens using the metadata.
:param parser_type: a type of parser, supported parsers = {html, text}
:param raw_string: a raw string of the original document.
:return: a list of string tokens
"""
parser = Tokenizer.create_parser(parser_type)
parser.clear(is_collecting_meta=False)
parser.feed(raw_string)
parser.close()
delimiter = "\n"
lines = raw_string.split(delimiter)
# If the <doc_start> token is somehow missing, let's add it manually.
_, first_line_number, first_offset = parser.tokens[0]
if not (first_line_number == 1 and first_offset == 0):
parser.tokens.insert(0, ("<doc_start>", 1, 0))
output_tokens = []
prev_token_meta_data = None
for token_meta_data in parser.tokens:
token_type, token_line_number, token_offset = token_meta_data
if not prev_token_meta_data is None:
prev_token_type, prev_token_line_number, prev_token_offset = prev_token_meta_data
output_token = Tokenizer.get_string_token_from(lines, prev_token_line_number, prev_token_offset,
token_line_number,
token_offset, delimiter)
output_tokens.append(output_token)
prev_token_meta_data = token_meta_data
return output_tokens
@staticmethod
def create_parser(parser_type):
if parser_type == "html":
return htmlparser.RawHTMLParser()
elif parser_type == "text":
return textparser.RawTextParser()
else:
raise Exception("Unknown parser type '{}'".format(parser_type))
return None
@staticmethod
def get_string_token_from(lines, prev_token_line_number, prev_token_offset, token_line_number, token_offset,
delimiter):
"""
Get a string token from split lines using metadata of line number and offset.
:param lines: the split lines
:param prev_token_line_number: a start line number
:param prev_token_offset: a start line offset
:param token_line_number: an end line number
:param token_offset: an end line offset
:param delimiter: the delimiter used in splitting the original lines
:return: the corresponding string token.
"""
prev_token_line_number -= 1 # Just to make sure an index starts from zero.
token_line_number -= 1
if prev_token_line_number == token_line_number:
return lines[prev_token_line_number][prev_token_offset:token_offset]
else:
temp_buf = ""
for current_line_number in range(prev_token_line_number, token_line_number + 1):
if current_line_number == prev_token_line_number:
temp_buf += (lines[current_line_number][prev_token_offset:] + delimiter)
elif current_line_number == token_line_number:
temp_buf += lines[current_line_number][:token_offset]
else:
temp_buf += (lines[current_line_number] + delimiter)
return temp_buf
| gpl-3.0 |
carlmw/oscar-wager | django/contrib/admin/util.py | 12 | 9600 | from django.db import models
from django.db.models.deletion import Collector
from django.db.models.related import RelatedObject
from django.forms.forms import pretty_name
from django.utils import formats
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.encoding import force_unicode, smart_unicode, smart_str
from django.utils.translation import ungettext
from django.core.urlresolvers import reverse, NoReverseMatch
from django.utils.datastructures import SortedDict
def quote(s):
"""
Ensure that primary key values do not confuse the admin URLs by escaping
any '/', '_' and ':' characters. Similar to urllib.quote, except that the
quoting is slightly different so that it doesn't get automatically
unquoted by the Web browser.
"""
if not isinstance(s, basestring):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"<>%\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
"""
Undo the effects of quote(). Based heavily on urllib.unquote().
"""
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
field_names = []
for name, opts in fieldsets:
for field in opts['fields']:
# type checking feels dirty, but it seems like the best way here
if type(field) == tuple:
field_names.extend(field)
else:
field_names.append(field)
return field_names
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogenous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
collector = NestedObjects(using=using)
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
if has_admin:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.object_name.lower()),
None, (quote(obj._get_pk_val()),))
p = '%s.%s' % (opts.app_label,
opts.get_delete_permission())
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return mark_safe(u'%s: <a href="%s">%s</a>' %
(escape(capfirst(opts.verbose_name)),
admin_url,
escape(obj)))
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return u'%s: %s' % (capfirst(opts.verbose_name),
force_unicode(obj))
to_delete = collector.nested(format_callback)
return to_delete, perms_needed
class NestedObjects(Collector):
def __init__(self, *args, **kwargs):
super(NestedObjects, self).__init__(*args, **kwargs)
self.edges = {} # {from_instance: [to_instances]}
def add_edge(self, source, target):
self.edges.setdefault(source, []).append(target)
def collect(self, objs, source_attr=None, **kwargs):
for obj in objs:
if source_attr:
self.add_edge(getattr(obj, source_attr), obj)
else:
self.add_edge(None, obj)
return super(NestedObjects, self).collect(objs, source_attr=source_attr, **kwargs)
def related_objects(self, related, objs):
qs = super(NestedObjects, self).related_objects(related, objs)
return qs.select_related(related.field.name)
def _nested(self, obj, seen, format_callback):
if obj in seen:
return []
seen.add(obj)
children = []
for child in self.edges.get(obj, ()):
children.extend(self._nested(child, seen, format_callback))
if format_callback:
ret = [format_callback(obj)]
else:
ret = [obj]
if children:
ret.append(children)
return ret
def nested(self, format_callback=None):
"""
Return the graph as a nested list.
"""
seen = set()
roots = []
for root in self.edges.get(None, ()):
roots.extend(self._nested(root, seen, format_callback))
return roots
def model_format_dict(obj):
"""
Return a `dict` with keys 'verbose_name' and 'verbose_name_plural',
typically for use with string formatting.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
"""
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
'verbose_name': force_unicode(opts.verbose_name),
'verbose_name_plural': force_unicode(opts.verbose_name_plural)
}
def model_ngettext(obj, n=None):
"""
Return the appropriate `verbose_name` or `verbose_name_plural` value for
`obj` depending on the count `n`.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
If `obj` is a `QuerySet` instance, `n` is optional and the length of the
`QuerySet` is used.
"""
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ungettext(singular, plural, n or 0)
def lookup_field(name, obj, model_admin=None):
opts = obj._meta
try:
f = opts.get_field(name)
except models.FieldDoesNotExist:
# For non-field values, the value is either a method, property or
# returned via a callable.
if callable(name):
attr = name
value = attr(obj)
elif (model_admin is not None and hasattr(model_admin, name) and
not name == '__str__' and not name == '__unicode__'):
attr = getattr(model_admin, name)
value = attr(obj)
else:
attr = getattr(obj, name)
if callable(attr):
value = attr()
else:
value = attr
f = None
else:
attr = None
value = getattr(obj, name)
return f, attr, value
def label_for_field(name, model, model_admin=None, return_attr=False):
attr = None
try:
field = model._meta.get_field_by_name(name)[0]
if isinstance(field, RelatedObject):
label = field.opts.verbose_name
else:
label = field.verbose_name
except models.FieldDoesNotExist:
if name == "__unicode__":
label = force_unicode(model._meta.verbose_name)
elif name == "__str__":
label = smart_str(model._meta.verbose_name)
else:
if callable(name):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
if return_attr:
return (label, attr)
else:
return label
def display_for_field(value, field):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if field.flatchoices:
return dict(field.flatchoices).get(value, EMPTY_CHANGELIST_VALUE)
# NullBooleanField needs special-case null-handling, so it comes
# before the general null test.
elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField):
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(field, models.DateField) or isinstance(field, models.TimeField):
return formats.localize(value)
elif isinstance(field, models.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, models.FloatField):
return formats.number_format(value)
else:
return smart_unicode(value)
| bsd-3-clause |
mozilla/stoneridge | python/src/Lib/test/test_unicodedata.py | 110 | 12083 | """ Test script for the unicodedata module.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import sys
import unittest
import hashlib
import subprocess
import test.test_support
encoding = 'utf-8'
### Run tests
class UnicodeMethodsTest(unittest.TestCase):
# update this, if the database changes
expectedchecksum = '4504dffd035baea02c5b9de82bebc3d65e0e0baf'
def test_method_checksum(self):
h = hashlib.sha1()
for i in range(0x10000):
char = unichr(i)
data = [
# Predicates (single char)
u"01"[char.isalnum()],
u"01"[char.isalpha()],
u"01"[char.isdecimal()],
u"01"[char.isdigit()],
u"01"[char.islower()],
u"01"[char.isnumeric()],
u"01"[char.isspace()],
u"01"[char.istitle()],
u"01"[char.isupper()],
# Predicates (multiple chars)
u"01"[(char + u'abc').isalnum()],
u"01"[(char + u'abc').isalpha()],
u"01"[(char + u'123').isdecimal()],
u"01"[(char + u'123').isdigit()],
u"01"[(char + u'abc').islower()],
u"01"[(char + u'123').isnumeric()],
u"01"[(char + u' \t').isspace()],
u"01"[(char + u'abc').istitle()],
u"01"[(char + u'ABC').isupper()],
# Mappings (single char)
char.lower(),
char.upper(),
char.title(),
# Mappings (multiple chars)
(char + u'abc').lower(),
(char + u'ABC').upper(),
(char + u'abc').title(),
(char + u'ABC').title(),
]
h.update(u''.join(data).encode(encoding))
result = h.hexdigest()
self.assertEqual(result, self.expectedchecksum)
class UnicodeDatabaseTest(unittest.TestCase):
def setUp(self):
# In case unicodedata is not available, this will raise an ImportError,
# but the other test cases will still be run
import unicodedata
self.db = unicodedata
def tearDown(self):
del self.db
class UnicodeFunctionsTest(UnicodeDatabaseTest):
# update this, if the database changes
expectedchecksum = '6ccf1b1a36460d2694f9b0b0f0324942fe70ede6'
def test_function_checksum(self):
data = []
h = hashlib.sha1()
for i in range(0x10000):
char = unichr(i)
data = [
# Properties
str(self.db.digit(char, -1)),
str(self.db.numeric(char, -1)),
str(self.db.decimal(char, -1)),
self.db.category(char),
self.db.bidirectional(char),
self.db.decomposition(char),
str(self.db.mirrored(char)),
str(self.db.combining(char)),
]
h.update(''.join(data))
result = h.hexdigest()
self.assertEqual(result, self.expectedchecksum)
def test_digit(self):
self.assertEqual(self.db.digit(u'A', None), None)
self.assertEqual(self.db.digit(u'9'), 9)
self.assertEqual(self.db.digit(u'\u215b', None), None)
self.assertEqual(self.db.digit(u'\u2468'), 9)
self.assertEqual(self.db.digit(u'\U00020000', None), None)
self.assertRaises(TypeError, self.db.digit)
self.assertRaises(TypeError, self.db.digit, u'xx')
self.assertRaises(ValueError, self.db.digit, u'x')
def test_numeric(self):
self.assertEqual(self.db.numeric(u'A',None), None)
self.assertEqual(self.db.numeric(u'9'), 9)
self.assertEqual(self.db.numeric(u'\u215b'), 0.125)
self.assertEqual(self.db.numeric(u'\u2468'), 9.0)
self.assertEqual(self.db.numeric(u'\ua627'), 7.0)
self.assertEqual(self.db.numeric(u'\U00020000', None), None)
self.assertRaises(TypeError, self.db.numeric)
self.assertRaises(TypeError, self.db.numeric, u'xx')
self.assertRaises(ValueError, self.db.numeric, u'x')
def test_decimal(self):
self.assertEqual(self.db.decimal(u'A',None), None)
self.assertEqual(self.db.decimal(u'9'), 9)
self.assertEqual(self.db.decimal(u'\u215b', None), None)
self.assertEqual(self.db.decimal(u'\u2468', None), None)
self.assertEqual(self.db.decimal(u'\U00020000', None), None)
self.assertRaises(TypeError, self.db.decimal)
self.assertRaises(TypeError, self.db.decimal, u'xx')
self.assertRaises(ValueError, self.db.decimal, u'x')
def test_category(self):
self.assertEqual(self.db.category(u'\uFFFE'), 'Cn')
self.assertEqual(self.db.category(u'a'), 'Ll')
self.assertEqual(self.db.category(u'A'), 'Lu')
self.assertEqual(self.db.category(u'\U00020000'), 'Lo')
self.assertRaises(TypeError, self.db.category)
self.assertRaises(TypeError, self.db.category, u'xx')
def test_bidirectional(self):
self.assertEqual(self.db.bidirectional(u'\uFFFE'), '')
self.assertEqual(self.db.bidirectional(u' '), 'WS')
self.assertEqual(self.db.bidirectional(u'A'), 'L')
self.assertEqual(self.db.bidirectional(u'\U00020000'), 'L')
self.assertRaises(TypeError, self.db.bidirectional)
self.assertRaises(TypeError, self.db.bidirectional, u'xx')
def test_decomposition(self):
self.assertEqual(self.db.decomposition(u'\uFFFE'),'')
self.assertEqual(self.db.decomposition(u'\u00bc'), '<fraction> 0031 2044 0034')
self.assertRaises(TypeError, self.db.decomposition)
self.assertRaises(TypeError, self.db.decomposition, u'xx')
def test_mirrored(self):
self.assertEqual(self.db.mirrored(u'\uFFFE'), 0)
self.assertEqual(self.db.mirrored(u'a'), 0)
self.assertEqual(self.db.mirrored(u'\u2201'), 1)
self.assertEqual(self.db.mirrored(u'\U00020000'), 0)
self.assertRaises(TypeError, self.db.mirrored)
self.assertRaises(TypeError, self.db.mirrored, u'xx')
def test_combining(self):
self.assertEqual(self.db.combining(u'\uFFFE'), 0)
self.assertEqual(self.db.combining(u'a'), 0)
self.assertEqual(self.db.combining(u'\u20e1'), 230)
self.assertEqual(self.db.combining(u'\U00020000'), 0)
self.assertRaises(TypeError, self.db.combining)
self.assertRaises(TypeError, self.db.combining, u'xx')
def test_normalize(self):
self.assertRaises(TypeError, self.db.normalize)
self.assertRaises(ValueError, self.db.normalize, 'unknown', u'xx')
self.assertEqual(self.db.normalize('NFKC', u''), u'')
# The rest can be found in test_normalization.py
# which requires an external file.
def test_pr29(self):
# http://www.unicode.org/review/pr-29.html
# See issues #1054943 and #10254.
composed = (u"\u0b47\u0300\u0b3e", u"\u1100\u0300\u1161",
u'Li\u030dt-s\u1e73\u0301',
u'\u092e\u093e\u0930\u094d\u0915 \u091c\u093c'
+ u'\u0941\u0915\u0947\u0930\u092c\u0930\u094d\u0917',
u'\u0915\u093f\u0930\u094d\u0917\u093f\u091c\u093c'
+ 'u\u0938\u094d\u0924\u093e\u0928')
for text in composed:
self.assertEqual(self.db.normalize('NFC', text), text)
def test_issue10254(self):
# Crash reported in #10254
a = u'C\u0338' * 20 + u'C\u0327'
b = u'C\u0338' * 20 + u'\xC7'
self.assertEqual(self.db.normalize('NFC', a), b)
def test_east_asian_width(self):
eaw = self.db.east_asian_width
self.assertRaises(TypeError, eaw, 'a')
self.assertRaises(TypeError, eaw, u'')
self.assertRaises(TypeError, eaw, u'ra')
self.assertEqual(eaw(u'\x1e'), 'N')
self.assertEqual(eaw(u'\x20'), 'Na')
self.assertEqual(eaw(u'\uC894'), 'W')
self.assertEqual(eaw(u'\uFF66'), 'H')
self.assertEqual(eaw(u'\uFF1F'), 'F')
self.assertEqual(eaw(u'\u2010'), 'A')
self.assertEqual(eaw(u'\U00020000'), 'W')
class UnicodeMiscTest(UnicodeDatabaseTest):
def test_failed_import_during_compiling(self):
# Issue 4367
# Decoding \N escapes requires the unicodedata module. If it can't be
# imported, we shouldn't segfault.
# This program should raise a SyntaxError in the eval.
code = "import sys;" \
"sys.modules['unicodedata'] = None;" \
"""eval("u'\N{SOFT HYPHEN}'")"""
args = [sys.executable, "-c", code]
# We use a subprocess because the unicodedata module may already have
# been loaded in this process.
popen = subprocess.Popen(args, stderr=subprocess.PIPE)
popen.wait()
self.assertEqual(popen.returncode, 1)
error = "SyntaxError: (unicode error) \N escapes not supported " \
"(can't load unicodedata module)"
self.assertIn(error, popen.stderr.read())
def test_decimal_numeric_consistent(self):
# Test that decimal and numeric are consistent,
# i.e. if a character has a decimal value,
# its numeric value should be the same.
count = 0
for i in xrange(0x10000):
c = unichr(i)
dec = self.db.decimal(c, -1)
if dec != -1:
self.assertEqual(dec, self.db.numeric(c))
count += 1
self.assertTrue(count >= 10) # should have tested at least the ASCII digits
def test_digit_numeric_consistent(self):
# Test that digit and numeric are consistent,
# i.e. if a character has a digit value,
# its numeric value should be the same.
count = 0
for i in xrange(0x10000):
c = unichr(i)
dec = self.db.digit(c, -1)
if dec != -1:
self.assertEqual(dec, self.db.numeric(c))
count += 1
self.assertTrue(count >= 10) # should have tested at least the ASCII digits
def test_bug_1704793(self):
self.assertEqual(self.db.lookup("GOTHIC LETTER FAIHU"), u'\U00010346')
def test_ucd_510(self):
import unicodedata
# In UCD 5.1.0, a mirrored property changed wrt. UCD 3.2.0
self.assertTrue(unicodedata.mirrored(u"\u0f3a"))
self.assertTrue(not unicodedata.ucd_3_2_0.mirrored(u"\u0f3a"))
# Also, we now have two ways of representing
# the upper-case mapping: as delta, or as absolute value
self.assertTrue(u"a".upper()==u'A')
self.assertTrue(u"\u1d79".upper()==u'\ua77d')
self.assertTrue(u".".upper()==u".")
def test_bug_5828(self):
self.assertEqual(u"\u1d79".lower(), u"\u1d79")
# Only U+0000 should have U+0000 as its upper/lower/titlecase variant
self.assertEqual(
[
c for c in range(sys.maxunicode+1)
if u"\x00" in unichr(c).lower()+unichr(c).upper()+unichr(c).title()
],
[0]
)
def test_bug_4971(self):
# LETTER DZ WITH CARON: DZ, Dz, dz
self.assertEqual(u"\u01c4".title(), u"\u01c5")
self.assertEqual(u"\u01c5".title(), u"\u01c5")
self.assertEqual(u"\u01c6".title(), u"\u01c5")
def test_linebreak_7643(self):
for i in range(0x10000):
lines = (unichr(i) + u'A').splitlines()
if i in (0x0a, 0x0b, 0x0c, 0x0d, 0x85,
0x1c, 0x1d, 0x1e, 0x2028, 0x2029):
self.assertEqual(len(lines), 2,
r"\u%.4x should be a linebreak" % i)
else:
self.assertEqual(len(lines), 1,
r"\u%.4x should not be a linebreak" % i)
def test_main():
test.test_support.run_unittest(
UnicodeMiscTest,
UnicodeMethodsTest,
UnicodeFunctionsTest
)
if __name__ == "__main__":
test_main()
| mpl-2.0 |
hamish/SmeltCMS | packages/docutils/transforms/components.py | 196 | 1993 | # $Id: components.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Docutils component-related transforms.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import time
from docutils import nodes, utils
from docutils import ApplicationError, DataError
from docutils.transforms import Transform, TransformError
class Filter(Transform):
"""
Include or exclude elements which depend on a specific Docutils component.
For use with `nodes.pending` elements. A "pending" element's dictionary
attribute ``details`` must contain the keys "component" and "format". The
value of ``details['component']`` must match the type name of the
component the elements depend on (e.g. "writer"). The value of
``details['format']`` is the name of a specific format or context of that
component (e.g. "html"). If the matching Docutils component supports that
format or context, the "pending" element is replaced by the contents of
``details['nodes']`` (a list of nodes); otherwise, the "pending" element
is removed.
For example, the reStructuredText "meta" directive creates a "pending"
element containing a "meta" element (in ``pending.details['nodes']``).
Only writers (``pending.details['component'] == 'writer'``) supporting the
"html" format (``pending.details['format'] == 'html'``) will include the
"meta" element; it will be deleted from the output of all other writers.
"""
default_priority = 780
def apply(self):
pending = self.startnode
component_type = pending.details['component'] # 'reader' or 'writer'
format = pending.details['format']
component = self.document.transformer.components[component_type]
if component.supports(format):
pending.replace_self(pending.details['nodes'])
else:
pending.parent.remove(pending)
| gpl-3.0 |
abhilashnta/edx-platform | pavelib/paver_tests/utils.py | 111 | 1498 | """Unit tests for the Paver server tasks."""
import os
from paver import tasks
from unittest import TestCase
class PaverTestCase(TestCase):
"""
Base class for Paver test cases.
"""
def setUp(self):
super(PaverTestCase, self).setUp()
# Show full length diffs upon test failure
self.maxDiff = None # pylint: disable=invalid-name
# Create a mock Paver environment
tasks.environment = MockEnvironment()
# Don't run pre-reqs
os.environ['NO_PREREQ_INSTALL'] = 'true'
def tearDown(self):
super(PaverTestCase, self).tearDown()
tasks.environment = tasks.Environment()
del os.environ['NO_PREREQ_INSTALL']
@property
def task_messages(self):
"""Returns the messages output by the Paver task."""
return tasks.environment.messages
def reset_task_messages(self):
"""Clear the recorded message"""
tasks.environment.messages = []
class MockEnvironment(tasks.Environment):
"""
Mock environment that collects information about Paver commands.
"""
def __init__(self):
super(MockEnvironment, self).__init__()
self.dry_run = True
self.messages = []
def info(self, message, *args):
"""Capture any messages that have been recorded"""
if args:
output = message % args
else:
output = message
if not output.startswith("--->"):
self.messages.append(output)
| agpl-3.0 |
ashwinr64/android_kernel_cyanogen_msm8916 | scripts/gcc-wrapper.py | 580 | 3524 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"return_address.c:63",
"kprobes.c:1493",
"rcutree.c:1614",
"af_unix.c:893",
"nl80211.c:58",
"jhash.h:137",
"cmpxchg.h:162",
"ping.c:87",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
| gpl-2.0 |
bartTC/django-frontendadmin | example_project/settings.py | 2 | 3743 | import sys, os
# Quick hack to put frontendadmin from the parent directory into pythonpath
# If this is not working correctly, uncomment these lines and put
# frontendadmin manually into pythonpath.
sys.path.append(
os.path.abspath(
os.path.normpath('%s/../' % os.path.dirname(__file__))
)
)
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = 'myproject.db' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'site_media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/site_media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '^6qlxq*maky)*u!fl+!_97m^zcywod0c)tujsm5+fngj1+y55x'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.core.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
)
ROOT_URLCONF = 'example_project.urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.comments',
'django.contrib.flatpages',
# Put frontendadmin before your applications, so that they can overwrite
# the frontendadmin templates.
'frontendadmin',
'example_project.weblog',
)
# Define custom forms to handle any model
FRONTEND_FORMS = {
# ``app_label.model_name`` : ``form_class``,
'weblog.entry': 'weblog.forms.EntryForm',
}
# Define which fields to exclude on a particular model
FRONTEND_EXCLUDES = {
# ``app_label.model_name`` : ``tuple``,
'weblog.entry': ('public',),
}
| bsd-3-clause |
panoptes/POCS | tests/test_sensors.py | 1 | 1867 | # Test sensors.py ability to read from two sensor boards.
import pytest
import responses
from panoptes.pocs.sensor import remote
from panoptes.pocs.sensor import power
from panoptes.utils import error
@pytest.fixture
def remote_response():
return {
"data": {
"source": "sleeping",
"dest": "ready"
},
"type": "state",
"_id": "1fb89552-f335-4f14-a599-5cd507012c2d"
}
@pytest.fixture
def remote_response_power():
return {
"power": {
"mains": True
},
}
@responses.activate
def test_remote_sensor(remote_response, remote_response_power):
endpoint_url_no_power = 'http://192.168.1.241:8081'
endpoint_url_with_power = 'http://192.168.1.241:8080'
responses.add(responses.GET, endpoint_url_no_power, json=remote_response)
responses.add(responses.GET, endpoint_url_with_power, json=remote_response_power)
remote_monitor = remote.RemoteMonitor(
sensor_name='test_remote',
endpoint_url=endpoint_url_no_power,
db_type='memory'
)
mocked_response = remote_monitor.capture(store_result=False)
del mocked_response['date']
assert remote_response == mocked_response
# Check caplog for disconnect
remote_monitor.disconnect()
power_monitor = remote.RemoteMonitor(
sensor_name='power',
endpoint_url=endpoint_url_with_power,
db_type='memory'
)
mocked_response = power_monitor.capture()
del mocked_response['date']
assert remote_response_power == mocked_response
def test_remote_sensor_no_endpoint():
with pytest.raises(error.PanError):
remote.RemoteMonitor(sensor_name='should_fail')
def test_power_board_no_device():
"""Attempt to find an arduino device, which should fail."""
with pytest.raises(error.NotFound):
power.PowerBoard()
| mit |
ivotron/teuthology | setup.py | 1 | 3300 | from setuptools import setup, find_packages
import re
module_file = open("teuthology/__init__.py").read()
metadata = dict(re.findall(r"__([a-z]+)__\s*=\s*['\"]([^'\"]*)['\"]", module_file))
long_description = open('README.rst').read()
setup(
name='teuthology',
version=metadata['version'],
packages=find_packages(),
package_data={
'teuthology.task': ['valgrind.supp', 'adjust-ulimits', 'edit_sudoers.sh', 'daemon-helper'],
'teuthology': ['ceph.conf.template'],
},
author='Inktank Storage, Inc.',
author_email='ceph-qa@ceph.com',
description='Ceph test framework',
license='MIT',
keywords='teuthology test ceph cluster',
url='https://github.com/ceph/teuthology',
long_description=long_description,
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
'Topic :: System :: Distributed Computing',
'Topic :: System :: Filesystems',
],
install_requires=['setuptools',
'gevent == 0.13.6', # 0.14 switches to libev, that means bootstrap needs to change too
'MySQL-python == 1.2.3',
'PyYAML',
'argparse >= 1.2.1',
'beanstalkc >= 0.2.0',
'boto >= 2.0b4',
'bunch >= 1.0.0',
'configobj',
'six >= 1.5.0', # Require a new-ish six - only for configobj
'httplib2',
'paramiko < 1.8',
'pexpect',
'requests >= 2.3.0',
'raven',
'web.py',
'docopt',
'psutil >= 2.1.0',
'configparser',
'pytest',
'ansible==1.9.2',
'pyopenssl>=0.13',
'ndg-httpsclient',
'pyasn1',
],
tests_require=['nose >=1.0.0', 'fudge >=1.0.3'],
# to find the code associated with entry point
# A.B:foo first cd into directory A, open file B
# and find sub foo
entry_points={
'console_scripts': [
'teuthology = scripts.run:main',
'teuthology-nuke = scripts.nuke:main',
'teuthology-suite = scripts.suite:main',
'teuthology-ls = scripts.ls:main',
'teuthology-worker = scripts.worker:main',
'teuthology-lock = scripts.lock:main',
'teuthology-schedule = scripts.schedule:main',
'teuthology-updatekeys = scripts.updatekeys:main',
'teuthology-update-inventory = scripts.update_inventory:main',
'teuthology-coverage = scripts.coverage:main',
'teuthology-results = scripts.results:main',
'teuthology-report = scripts.report:main',
'teuthology-kill = scripts.kill:main',
'teuthology-queue = scripts.queue:main',
],
},
)
| mit |
dbremner/bite-project | deps/gdata-python-client/samples/apps/marketplace_sample/atom/mock_http_core.py | 102 | 12008 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import StringIO
import pickle
import os.path
import tempfile
import atom.http_core
class Error(Exception):
pass
class NoRecordingFound(Error):
pass
class MockHttpClient(object):
debug = None
real_client = None
last_request_was_live = False
# The following members are used to construct the session cache temp file
# name.
# These are combined to form the file name
# /tmp/cache_prefix.cache_case_name.cache_test_name
cache_name_prefix = 'gdata_live_test'
cache_case_name = ''
cache_test_name = ''
def __init__(self, recordings=None, real_client=None):
self._recordings = recordings or []
if real_client is not None:
self.real_client = real_client
def add_response(self, http_request, status, reason, headers=None,
body=None):
response = MockHttpResponse(status, reason, headers, body)
# TODO Scrub the request and the response.
self._recordings.append((http_request._copy(), response))
AddResponse = add_response
def request(self, http_request):
"""Provide a recorded response, or record a response for replay.
If the real_client is set, the request will be made using the
real_client, and the response from the server will be recorded.
If the real_client is None (the default), this method will examine
the recordings and find the first which matches.
"""
request = http_request._copy()
_scrub_request(request)
if self.real_client is None:
self.last_request_was_live = False
for recording in self._recordings:
if _match_request(recording[0], request):
return recording[1]
else:
# Pass along the debug settings to the real client.
self.real_client.debug = self.debug
# Make an actual request since we can use the real HTTP client.
self.last_request_was_live = True
response = self.real_client.request(http_request)
scrubbed_response = _scrub_response(response)
self.add_response(request, scrubbed_response.status,
scrubbed_response.reason,
dict(atom.http_core.get_headers(scrubbed_response)),
scrubbed_response.read())
# Return the recording which we just added.
return self._recordings[-1][1]
raise NoRecordingFound('No recoding was found for request: %s %s' % (
request.method, str(request.uri)))
Request = request
def _save_recordings(self, filename):
recording_file = open(os.path.join(tempfile.gettempdir(), filename),
'wb')
pickle.dump(self._recordings, recording_file)
recording_file.close()
def _load_recordings(self, filename):
recording_file = open(os.path.join(tempfile.gettempdir(), filename),
'rb')
self._recordings = pickle.load(recording_file)
recording_file.close()
def _delete_recordings(self, filename):
full_path = os.path.join(tempfile.gettempdir(), filename)
if os.path.exists(full_path):
os.remove(full_path)
def _load_or_use_client(self, filename, http_client):
if os.path.exists(os.path.join(tempfile.gettempdir(), filename)):
self._load_recordings(filename)
else:
self.real_client = http_client
def use_cached_session(self, name=None, real_http_client=None):
"""Attempts to load recordings from a previous live request.
If a temp file with the recordings exists, then it is used to fulfill
requests. If the file does not exist, then a real client is used to
actually make the desired HTTP requests. Requests and responses are
recorded and will be written to the desired temprary cache file when
close_session is called.
Args:
name: str (optional) The file name of session file to be used. The file
is loaded from the temporary directory of this machine. If no name
is passed in, a default name will be constructed using the
cache_name_prefix, cache_case_name, and cache_test_name of this
object.
real_http_client: atom.http_core.HttpClient the real client to be used
if the cached recordings are not found. If the default
value is used, this will be an
atom.http_core.HttpClient.
"""
if real_http_client is None:
real_http_client = atom.http_core.HttpClient()
if name is None:
self._recordings_cache_name = self.get_cache_file_name()
else:
self._recordings_cache_name = name
self._load_or_use_client(self._recordings_cache_name, real_http_client)
def close_session(self):
"""Saves recordings in the temporary file named in use_cached_session."""
if self.real_client is not None:
self._save_recordings(self._recordings_cache_name)
def delete_session(self, name=None):
"""Removes recordings from a previous live request."""
if name is None:
self._delete_recordings(self._recordings_cache_name)
else:
self._delete_recordings(name)
def get_cache_file_name(self):
return '%s.%s.%s' % (self.cache_name_prefix, self.cache_case_name,
self.cache_test_name)
def _dump(self):
"""Provides debug information in a string."""
output = 'MockHttpClient\n real_client: %s\n cache file name: %s\n' % (
self.real_client, self.get_cache_file_name())
output += ' recordings:\n'
i = 0
for recording in self._recordings:
output += ' recording %i is for: %s %s\n' % (
i, recording[0].method, str(recording[0].uri))
i += 1
return output
def _match_request(http_request, stored_request):
"""Determines whether a request is similar enough to a stored request
to cause the stored response to be returned."""
# Check to see if the host names match.
if (http_request.uri.host is not None
and http_request.uri.host != stored_request.uri.host):
return False
# Check the request path in the URL (/feeds/private/full/x)
elif http_request.uri.path != stored_request.uri.path:
return False
# Check the method used in the request (GET, POST, etc.)
elif http_request.method != stored_request.method:
return False
# If there is a gsession ID in either request, make sure that it is matched
# exactly.
elif ('gsessionid' in http_request.uri.query
or 'gsessionid' in stored_request.uri.query):
if 'gsessionid' not in stored_request.uri.query:
return False
elif 'gsessionid' not in http_request.uri.query:
return False
elif (http_request.uri.query['gsessionid']
!= stored_request.uri.query['gsessionid']):
return False
# Ignores differences in the query params (?start-index=5&max-results=20),
# the body of the request, the port number, HTTP headers, just to name a
# few.
return True
def _scrub_request(http_request):
""" Removes email address and password from a client login request.
Since the mock server saves the request and response in plantext, sensitive
information like the password should be removed before saving the
recordings. At the moment only requests sent to a ClientLogin url are
scrubbed.
"""
if (http_request and http_request.uri and http_request.uri.path and
http_request.uri.path.endswith('ClientLogin')):
# Remove the email and password from a ClientLogin request.
http_request._body_parts = []
http_request.add_form_inputs(
{'form_data': 'client login request has been scrubbed'})
else:
# We can remove the body of the post from the recorded request, since
# the request body is not used when finding a matching recording.
http_request._body_parts = []
return http_request
def _scrub_response(http_response):
return http_response
class EchoHttpClient(object):
"""Sends the request data back in the response.
Used to check the formatting of the request as it was sent. Always responds
with a 200 OK, and some information from the HTTP request is returned in
special Echo-X headers in the response. The following headers are added
in the response:
'Echo-Host': The host name and port number to which the HTTP connection is
made. If no port was passed in, the header will contain
host:None.
'Echo-Uri': The path portion of the URL being requested. /example?x=1&y=2
'Echo-Scheme': The beginning of the URL, usually 'http' or 'https'
'Echo-Method': The HTTP method being used, 'GET', 'POST', 'PUT', etc.
"""
def request(self, http_request):
return self._http_request(http_request.uri, http_request.method,
http_request.headers, http_request._body_parts)
def _http_request(self, uri, method, headers=None, body_parts=None):
body = StringIO.StringIO()
response = atom.http_core.HttpResponse(status=200, reason='OK', body=body)
if headers is None:
response._headers = {}
else:
# Copy headers from the request to the response but convert values to
# strings. Server response headers always come in as strings, so an int
# should be converted to a corresponding string when echoing.
for header, value in headers.iteritems():
response._headers[header] = str(value)
response._headers['Echo-Host'] = '%s:%s' % (uri.host, str(uri.port))
response._headers['Echo-Uri'] = uri._get_relative_path()
response._headers['Echo-Scheme'] = uri.scheme
response._headers['Echo-Method'] = method
for part in body_parts:
if isinstance(part, str):
body.write(part)
elif hasattr(part, 'read'):
body.write(part.read())
body.seek(0)
return response
class SettableHttpClient(object):
"""An HTTP Client which responds with the data given in set_response."""
def __init__(self, status, reason, body, headers):
"""Configures the response for the server.
See set_response for details on the arguments to the constructor.
"""
self.set_response(status, reason, body, headers)
self.last_request = None
def set_response(self, status, reason, body, headers):
"""Determines the response which will be sent for each request.
Args:
status: An int for the HTTP status code, example: 200, 404, etc.
reason: String for the HTTP reason, example: OK, NOT FOUND, etc.
body: The body of the HTTP response as a string or a file-like
object (something with a read method).
headers: dict of strings containing the HTTP headers in the response.
"""
self.response = atom.http_core.HttpResponse(status=status, reason=reason,
body=body)
self.response._headers = headers.copy()
def request(self, http_request):
self.last_request = http_request
return self.response
class MockHttpResponse(atom.http_core.HttpResponse):
def __init__(self, status=None, reason=None, headers=None, body=None):
self._headers = headers or {}
if status is not None:
self.status = status
if reason is not None:
self.reason = reason
if body is not None:
# Instead of using a file-like object for the body, store as a string
# so that reads can be repeated.
if hasattr(body, 'read'):
self._body = body.read()
else:
self._body = body
def read(self):
return self._body
| apache-2.0 |
XuezheMax/NeuroNLP | experiments/bi-maxru-cnn.py | 1 | 25603 | __author__ = 'max'
"""
Implementation of Bi-directional TARU-CNNs-CRF model for sequence labeling.
"""
import time
import sys
import argparse
import numpy as np
import lasagne
import theano
import theano.tensor as T
from lasagne.layers import Gate
from lasagne import nonlinearities
from lasagne.updates import nesterov_momentum, adam
from neuronlp.io import data_utils, get_logger
from neuronlp import utils
from neuronlp.layers.recurrent import MAXRULayer
from neuronlp.layers.conv import ConvTimeStep1DLayer
from neuronlp.layers.pool import PoolTimeStep1DLayer
WORD_DIM = 100
CHARACTER_DIM = 30
def build_std_dropout(incoming1, incoming2, num_units, num_time_units, max_length, num_labels,
mask, grad_clipping, num_filters, p):
# Construct Bi-directional LSTM-CNNs-CRF with standard dropout.
# first get some necessary dimensions or parameters
conv_window = 3
# shape = [batch, n-step, c_dim, char_length]
incoming1 = lasagne.layers.DropoutLayer(incoming1, p=p)
# construct convolution layer
# shape = [batch, n-step, c_filters, output_length]
cnn_layer = ConvTimeStep1DLayer(incoming1, num_filters=num_filters, filter_size=conv_window, pad='full',
nonlinearity=lasagne.nonlinearities.tanh, name='cnn')
# infer the pool size for pooling (pool size should go through all time step of cnn)
_, _, _, pool_size = cnn_layer.output_shape
# construct max pool layer
# shape = [batch, n-step, c_filters, 1]
pool_layer = PoolTimeStep1DLayer(cnn_layer, pool_size=pool_size)
# reshape: [batch, n-step, c_filters, 1] --> [batch, n-step, c_filters]
output_cnn_layer = lasagne.layers.reshape(pool_layer, ([0], [1], [2]))
# finally, concatenate the two incoming layers together.
# shape = [batch, n-step, c_filter&w_dim]
incoming = lasagne.layers.concat([output_cnn_layer, incoming2], axis=2)
# dropout for incoming
incoming = lasagne.layers.DropoutLayer(incoming, p=0.2)
time_updategate_forward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(), W_cell=None)
time_update_forward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
W_cell=None, nonlinearity=nonlinearities.tanh)
resetgate_forward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
W_cell=lasagne.init.GlorotUniform())
updategate_forward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
W_cell=lasagne.init.GlorotUniform())
hiden_update_forward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(), W_cell=None,
nonlinearity=nonlinearities.tanh)
maxru_forward = MAXRULayer(incoming, num_units, num_time_units, max_length, mask_input=mask,
P_time=lasagne.init.GlorotUniform(), nonlinearity=nonlinearities.tanh,
resetgate=resetgate_forward, updategate=updategate_forward,
hidden_update=hiden_update_forward,
time_updategate=time_updategate_forward, time_update=time_update_forward,
grad_clipping=grad_clipping, p=0., name='forward')
time_updategate_backward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(), W_cell=None)
time_update_backward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
W_cell=None, nonlinearity=nonlinearities.tanh)
resetgate_backward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
W_cell=lasagne.init.GlorotUniform())
updategate_backward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
W_cell=lasagne.init.GlorotUniform())
hiden_update_backward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(), W_cell=None,
nonlinearity=nonlinearities.tanh)
maxru_backward = MAXRULayer(incoming, num_units, num_time_units, max_length, mask_input=mask,
P_time=lasagne.init.GlorotUniform(), nonlinearity=nonlinearities.tanh,
resetgate=resetgate_backward, updategate=updategate_backward,
hidden_update=hiden_update_backward,
time_updategate=time_updategate_backward, time_update=time_update_backward,
grad_clipping=grad_clipping, p=0., backwards=True, name='backward')
# concatenate the outputs of forward and backward LSTMs to combine them.
bi_maxru_cnn = lasagne.layers.concat([maxru_forward, maxru_backward], axis=2, name="bi-maxru-cnn")
bi_maxru_cnn = lasagne.layers.DropoutLayer(bi_maxru_cnn, p=p)
# reshape bi-rnn-cnn to [batch * max_length, num_units]
bi_maxru_cnn = lasagne.layers.reshape(bi_maxru_cnn, (-1, [2]))
# construct output layer (dense layer with softmax)
layer_output = lasagne.layers.DenseLayer(bi_maxru_cnn, num_units=num_labels, nonlinearity=nonlinearities.softmax,
name='softmax')
return layer_output
def build_recur_dropout(incoming1, incoming2, num_units, num_time_units, max_length, num_labels, mask, grad_clipping,
num_filters, p):
# Construct Bi-directional LSTM-CNNs-CRF with recurrent dropout.
# first get some necessary dimensions or parameters
conv_window = 3
# shape = [batch, n-step, c_dim, char_length]
# construct convolution layer
# shape = [batch, n-step, c_filters, output_length]
cnn_layer = ConvTimeStep1DLayer(incoming1, num_filters=num_filters, filter_size=conv_window, pad='full',
nonlinearity=lasagne.nonlinearities.tanh, name='cnn')
# infer the pool size for pooling (pool size should go through all time step of cnn)
_, _, _, pool_size = cnn_layer.output_shape
# construct max pool layer
# shape = [batch, n-step, c_filters, 1]
pool_layer = PoolTimeStep1DLayer(cnn_layer, pool_size=pool_size)
# reshape: [batch, n-step, c_filters, 1] --> [batch, n-step, c_filters]
output_cnn_layer = lasagne.layers.reshape(pool_layer, ([0], [1], [2]))
# finally, concatenate the two incoming layers together.
# shape = [batch, n-step, c_filter&w_dim]
incoming = lasagne.layers.concat([output_cnn_layer, incoming2], axis=2)
# dropout for incoming
incoming = lasagne.layers.DropoutLayer(incoming, p=0.2, shared_axes=(1,))
time_updategate_forward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(), W_cell=None)
time_update_forward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
W_cell=None, nonlinearity=nonlinearities.tanh)
resetgate_forward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
W_cell=lasagne.init.GlorotUniform())
updategate_forward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
W_cell=lasagne.init.GlorotUniform())
hiden_update_forward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(), W_cell=None,
nonlinearity=nonlinearities.tanh)
maxru_forward = MAXRULayer(incoming, num_units, num_time_units, max_length, mask_input=mask,
P_time=lasagne.init.GlorotUniform(), nonlinearity=nonlinearities.tanh,
resetgate=resetgate_forward, updategate=updategate_forward,
hidden_update=hiden_update_forward,
time_updategate=time_updategate_forward, time_update=time_update_forward,
grad_clipping=grad_clipping, p=p, name='forward')
time_updategate_backward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(), W_cell=None)
time_update_backward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
W_cell=None, nonlinearity=nonlinearities.tanh)
resetgate_backward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
W_cell=lasagne.init.GlorotUniform())
updategate_backward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(),
W_cell=lasagne.init.GlorotUniform())
hiden_update_backward = Gate(W_in=lasagne.init.GlorotUniform(), W_hid=lasagne.init.GlorotUniform(), W_cell=None,
nonlinearity=nonlinearities.tanh)
maxru_backward = MAXRULayer(incoming, num_units, num_time_units, max_length, mask_input=mask,
P_time=lasagne.init.GlorotUniform(), nonlinearity=nonlinearities.tanh,
resetgate=resetgate_backward, updategate=updategate_backward,
hidden_update=hiden_update_backward,
time_updategate=time_updategate_backward, time_update=time_update_backward,
grad_clipping=grad_clipping, p=p, backwards=True, name='backward')
# concatenate the outputs of forward and backward LSTMs to combine them.
bi_maxru_cnn = lasagne.layers.concat([maxru_forward, maxru_backward], axis=2, name="bi-maxru-cnn")
# shape = [batch, n-step, num_units]
bi_maxru_cnn = lasagne.layers.DropoutLayer(bi_maxru_cnn, p=p, shared_axes=(1,))
# reshape bi-rnn-cnn to [batch * max_length, num_units]
bi_maxru_cnn = lasagne.layers.reshape(bi_maxru_cnn, (-1, [2]))
# construct output layer (dense layer with softmax)
layer_output = lasagne.layers.DenseLayer(bi_maxru_cnn, num_units=num_labels, nonlinearity=nonlinearities.softmax,
name='softmax')
return layer_output
def build_network(word_var, char_var, mask_var, word_alphabet, char_alphabet, dropout, num_units, num_time_units,
max_length, num_labels, grad_clipping=5.0, num_filters=30, p=0.5):
def generate_random_embedding(scale, shape):
return np.random.uniform(-scale, scale, shape).astype(theano.config.floatX)
def construct_word_embedding_table():
scale = np.sqrt(3.0 / WORD_DIM)
table = np.empty([word_alphabet.size(), WORD_DIM], dtype=theano.config.floatX)
table[data_utils.UNK_ID, :] = generate_random_embedding(scale, [1, WORD_DIM])
for word, index in word_alphabet.iteritems():
ww = word.lower() if caseless else word
embedding = embedd_dict[ww] if ww in embedd_dict else generate_random_embedding(scale, [1, WORD_DIM])
table[index, :] = embedding
return table
def construct_char_embedding_table():
scale = np.sqrt(3.0 / CHARACTER_DIM)
table = generate_random_embedding(scale, [char_alphabet.size(), CHARACTER_DIM])
return table
def construct_word_input_layer():
# shape = [batch, n-step]
layer_word_input = lasagne.layers.InputLayer(shape=(None, None), input_var=word_var, name='word_input')
# shape = [batch, n-step, w_dim]
layer_word_embedding = lasagne.layers.EmbeddingLayer(layer_word_input, input_size=word_alphabet.size(),
output_size=WORD_DIM, W=word_table, name='word_embedd')
return layer_word_embedding
def construct_char_input_layer():
# shape = [batch, n-step, char_length]
layer_char_input = lasagne.layers.InputLayer(shape=(None, None, data_utils.MAX_CHAR_LENGTH), input_var=char_var,
name='char_input')
# shape = [batch, n-step, char_length, c_dim]
layer_char_embedding = lasagne.layers.EmbeddingLayer(layer_char_input, input_size=char_alphabet.size(),
output_size=CHARACTER_DIM, W=char_table,
name='char_embedd')
# shape = [batch, n-step, c_dim, char_length]
layer_char_embedding = lasagne.layers.DimshuffleLayer(layer_char_embedding, pattern=(0, 1, 3, 2))
return layer_char_embedding
embedd_dict, embedd_dim, caseless = utils.load_word_embedding_dict('glove', "data/glove/glove.6B/glove.6B.100d.gz")
assert embedd_dim == WORD_DIM
word_table = construct_word_embedding_table()
char_table = construct_char_embedding_table()
layer_char_input = construct_char_input_layer()
layer_word_input = construct_word_input_layer()
layer_mask = lasagne.layers.InputLayer(shape=(None, None), input_var=mask_var, name='mask')
if dropout == 'std':
return build_std_dropout(layer_char_input, layer_word_input, num_units, num_time_units, max_length, num_labels,
layer_mask, grad_clipping, num_filters, p)
elif dropout == 'recurrent':
return build_recur_dropout(layer_char_input, layer_word_input, num_units, num_time_units, max_length,
num_labels, layer_mask, grad_clipping, num_filters, p)
else:
raise ValueError('unkown dropout patten: %s' % dropout)
def main():
parser = argparse.ArgumentParser(description='Tuning with bi-directional MAXRU-CNN')
parser.add_argument('--num_epochs', type=int, default=1000, help='Number of training epochs')
parser.add_argument('--batch_size', type=int, default=16, help='Number of sentences in each batch')
parser.add_argument('--num_units', type=int, default=100, help='Number of hidden units in TARU')
parser.add_argument('--num_time_units', type=int, default=100, help='Number of hidden time units in TARU')
parser.add_argument('--num_filters', type=int, default=20, help='Number of filters in CNN')
parser.add_argument('--learning_rate', type=float, default=0.1, help='Learning rate')
parser.add_argument('--decay_rate', type=float, default=0.1, help='Decay rate of learning rate')
parser.add_argument('--grad_clipping', type=float, default=0, help='Gradient clipping')
parser.add_argument('--gamma', type=float, default=1e-6, help='weight for regularization')
parser.add_argument('--regular', choices=['none', 'l2'], help='regularization for training', required=True)
parser.add_argument('--dropout', choices=['std', 'recurrent'], help='dropout patten')
parser.add_argument('--schedule', nargs='+', type=int, help='schedule for learning rate decay')
parser.add_argument('--output_prediction', action='store_true', help='Output predictions to temp files')
parser.add_argument('--train') # "data/POS-penn/wsj/split1/wsj1.train.original"
parser.add_argument('--dev') # "data/POS-penn/wsj/split1/wsj1.dev.original"
parser.add_argument('--test') # "data/POS-penn/wsj/split1/wsj1.test.original"
args = parser.parse_args()
logger = get_logger("Sequence Labeling (MAXRU-CNN)")
train_path = args.train
dev_path = args.dev
test_path = args.test
num_epochs = args.num_epochs
batch_size = args.batch_size
num_units = args.num_units
num_time_units = args.num_time_units
num_filters = args.num_filters
regular = args.regular
grad_clipping = args.grad_clipping
gamma = args.gamma
learning_rate = args.learning_rate
momentum = 0.9
decay_rate = args.decay_rate
schedule = args.schedule
output_predict = args.output_prediction
dropout = args.dropout
p = 0.33
max_length = 150
logger.info("Creating Alphabets")
word_alphabet, char_alphabet, pos_alphabet, type_alphabet = data_utils.create_alphabets("data/alphabets/",
[train_path, dev_path,
test_path],
40000)
logger.info("Word Alphabet Size: %d" % word_alphabet.size())
logger.info("Character Alphabet Size: %d" % char_alphabet.size())
logger.info("POS Alphabet Size: %d" % pos_alphabet.size())
num_labels = pos_alphabet.size() - 1
logger.info("Reading Data")
data_train = data_utils.read_data(train_path, word_alphabet, char_alphabet, pos_alphabet, type_alphabet)
data_dev = data_utils.read_data(dev_path, word_alphabet, char_alphabet, pos_alphabet, type_alphabet)
data_test = data_utils.read_data(test_path, word_alphabet, char_alphabet, pos_alphabet, type_alphabet)
num_data = sum([len(bucket) for bucket in data_train])
logger.info("constructing network...")
# create variables
target_var = T.imatrix(name='targets')
mask_var = T.matrix(name='masks', dtype=theano.config.floatX)
mask_nr_var = T.matrix(name='masks_nr', dtype=theano.config.floatX)
word_var = T.imatrix(name='inputs')
char_var = T.itensor3(name='char-inputs')
network = build_network(word_var, char_var, mask_var, word_alphabet, char_alphabet, dropout, num_units,
num_time_units, max_length, num_labels, grad_clipping, num_filters, p)
logger.info("Network: time=%d, hidden=%d, filter=%d, dropout=%s" % (num_time_units, num_units, num_filters, dropout))
# compute loss
num_tokens = mask_var.sum(dtype=theano.config.floatX)
num_tokens_nr = mask_nr_var.sum(dtype=theano.config.floatX)
# get output of bi-taru-cnn shape=[batch * max_length, #label]
prediction_train = lasagne.layers.get_output(network)
prediction_eval = lasagne.layers.get_output(network, deterministic=True)
final_prediction = T.argmax(prediction_eval, axis=1)
# flat target_var to vector
target_var_flatten = target_var.flatten() - 1
# flat mask_var to vector
mask_var_flatten = mask_var.flatten()
# flat mask_nr_var to vector
mask_nr_var_flatten = mask_nr_var.flatten()
# compute loss
# for training, we use mean of loss over number of labels
loss_train = lasagne.objectives.categorical_crossentropy(prediction_train, target_var_flatten)
loss_train = (loss_train * mask_var_flatten).sum(dtype=theano.config.floatX) / num_tokens
# l2 regularization?
if regular == 'l2':
l2_penalty = lasagne.regularization.regularize_network_params(network, lasagne.regularization.l2)
loss_train = loss_train + gamma * l2_penalty
loss_eval = lasagne.objectives.categorical_crossentropy(prediction_eval, target_var_flatten)
loss_eval = (loss_eval * mask_var_flatten).sum(dtype=theano.config.floatX) / num_tokens
# compute number of correct labels
corr_train = lasagne.objectives.categorical_accuracy(prediction_train, target_var_flatten)
corr_nr_train = (corr_train * mask_nr_var_flatten).sum(dtype=theano.config.floatX)
corr_train = (corr_train * mask_var_flatten).sum(dtype=theano.config.floatX)
corr_eval = lasagne.objectives.categorical_accuracy(prediction_eval, target_var_flatten)
corr_nr_eval = (corr_eval * mask_nr_var_flatten).sum(dtype=theano.config.floatX)
corr_eval = (corr_eval * mask_var_flatten).sum(dtype=theano.config.floatX)
params = lasagne.layers.get_all_params(network, trainable=True)
# updates = nesterov_momentum(loss_train, params=params, learning_rate=learning_rate, momentum=momentum)
updates = adam(loss_train, params=params, learning_rate=learning_rate, beta1=0.9, beta2=0.9)
# Compile a function performing a training step on a mini-batch
train_fn = theano.function([word_var, char_var, target_var, mask_var, mask_nr_var],
[loss_train, corr_train, corr_nr_train, num_tokens, num_tokens_nr], updates=updates)
# Compile a second function evaluating the loss and accuracy of network
eval_fn = theano.function([word_var, char_var, target_var, mask_var, mask_nr_var],
[corr_eval, corr_nr_eval, num_tokens, num_tokens_nr, final_prediction])
# Finally, launch the training loop.
logger.info(
"Start training: regularization: %s(%f), dropout: %s (#training data: %d, batch size: %d, clip: %.1f)..." \
% (regular, (0.0 if regular == 'none' else gamma), dropout, num_data, batch_size, grad_clipping))
num_batches = num_data / batch_size + 1
dev_correct = 0.0
dev_correct_nr = 0.0
best_epoch = 0
test_correct = 0.0
test_correct_nr = 0.0
test_total = 0
test_total_nr = 0
lr = learning_rate
for epoch in range(1, num_epochs + 1):
print 'Epoch %d (learning rate=%.4f, decay rate=%.4f): ' % (epoch, lr, decay_rate)
train_err = 0.0
train_corr = 0.0
train_corr_nr = 0.0
train_total = 0
train_total_nr = 0
train_inst = 0
start_time = time.time()
num_back = 0
for batch in xrange(1, num_batches + 1):
wids, cids, pids, _, _, masks = data_utils.get_batch(data_train, batch_size)
masks_nr = np.copy(masks)
masks_nr[:, 0] = 0
err, corr, corr_nr, num, num_nr = train_fn(wids, cids, pids, masks, masks_nr)
train_err += err * wids.shape[0]
train_corr += corr
train_corr_nr += corr_nr
train_total += num
train_total_nr += num_nr
train_inst += wids.shape[0]
time_ave = (time.time() - start_time) / batch
time_left = (num_batches - batch) * time_ave
# update log
sys.stdout.write("\b" * num_back)
log_info = 'train: %d/%d loss: %.4f, acc: %.2f%%, acc(no root): %.2f%%, time left (estimated): %.2fs' % (
batch, num_batches, train_err / train_inst, train_corr * 100 / train_total,
train_corr_nr * 100 / train_total_nr, time_left)
sys.stdout.write(log_info)
num_back = len(log_info)
# update training log after each epoch
assert train_inst == num_batches * batch_size
assert train_total == train_total_nr + train_inst
sys.stdout.write("\b" * num_back)
print 'train: %d/%d loss: %.4f, acc: %.2f%%, acc(no root): %.2f%%, time: %.2fs' % (
train_inst, train_inst, train_err / train_inst, train_corr * 100 / train_total,
train_corr_nr * 100 / train_total_nr, time.time() - start_time)
# evaluate performance on dev data
dev_corr = 0.0
dev_corr_nr = 0.0
dev_total = 0
dev_total_nr = 0
dev_inst = 0
for batch in data_utils.iterate_batch(data_dev, batch_size):
wids, cids, pids, _, _, masks = batch
masks_nr = np.copy(masks)
masks_nr[:, 0] = 0
corr, corr_nr, num, num_nr, predictions = eval_fn(wids, cids, pids, masks, masks_nr)
dev_corr += corr
dev_corr_nr += corr_nr
dev_total += num
dev_total_nr += num_nr
dev_inst += wids.shape[0]
assert dev_total == dev_total_nr + dev_inst
print 'dev corr: %d, total: %d, acc: %.2f%%, no root corr: %d, total: %d, acc: %.2f%%' % (
dev_corr, dev_total, dev_corr * 100 / dev_total, dev_corr_nr, dev_total_nr,
dev_corr_nr * 100 / dev_total_nr)
if dev_correct_nr < dev_corr_nr:
dev_correct = dev_corr
dev_correct_nr = dev_corr_nr
best_epoch = epoch
# evaluate on test data when better performance detected
test_corr = 0.0
test_corr_nr = 0.0
test_total = 0
test_total_nr = 0
test_inst = 0
for batch in data_utils.iterate_batch(data_test, batch_size):
wids, cids, pids, _, _, masks = batch
masks_nr = np.copy(masks)
masks_nr[:, 0] = 0
corr, corr_nr, num, num_nr, predictions = eval_fn(wids, cids, pids, masks, masks_nr)
test_corr += corr
test_corr_nr += corr_nr
test_total += num
test_total_nr += num_nr
test_inst += wids.shape[0]
assert test_total + test_total_nr + test_inst
test_correct = test_corr
test_correct_nr = test_corr_nr
print "best dev corr: %d, total: %d, acc: %.2f%%, no root corr: %d, total: %d, acc: %.2f%% (epoch: %d)" % (
dev_correct, dev_total, dev_correct * 100 / dev_total,
dev_correct_nr, dev_total_nr, dev_correct_nr * 100 / dev_total_nr, best_epoch)
print "best test corr: %d, total: %d, acc: %.2f%%, no root corr: %d, total: %d, acc: %.2f%% (epoch: %d)" % (
test_correct, test_total, test_correct * 100 / test_total,
test_correct_nr, test_total_nr, test_correct_nr * 100 / test_total_nr, best_epoch)
if epoch in schedule:
lr = lr * decay_rate
updates = adam(loss_train, params=params, learning_rate=lr, beta1=0.9, beta2=0.9)
train_fn = theano.function([word_var, char_var, target_var, mask_var, mask_nr_var],
[loss_train, corr_train, corr_nr_train, num_tokens, num_tokens_nr],
updates=updates)
if __name__ == '__main__':
main()
| mit |
Juniper/nova | nova/tests/unit/virt/xenapi/test_vmops.py | 2 | 98803 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
try:
import xmlrpclib
except ImportError:
import six.moves.xmlrpc_client as xmlrpclib
from eventlet import greenthread
import mock
from os_xenapi.client import host_xenstore
from os_xenapi.client import session as xenapi_session
import six
from nova.compute import power_state
from nova.compute import task_states
from nova import context
from nova import exception
from nova import objects
from nova.objects import fields
from nova.pci import manager as pci_manager
from nova import test
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.xenapi import stubs
from nova.tests import uuidsentinel as uuids
from nova import utils
from nova.virt import fake
from nova.virt.xenapi import agent as xenapi_agent
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import volumeops
class VMOpsTestBase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(VMOpsTestBase, self).setUp()
self._setup_mock_vmops()
self.vms = []
def _setup_mock_vmops(self, product_brand=None, product_version=None):
stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
self._session = xenapi_session.XenAPISession('test_url', 'root',
'test_pass')
self.vmops = vmops.VMOps(self._session, fake.FakeVirtAPI())
def create_vm(self, name, state="Running"):
vm_ref = xenapi_fake.create_vm(name, state)
self.vms.append(vm_ref)
vm = xenapi_fake.get_record("VM", vm_ref)
return vm, vm_ref
def tearDown(self):
super(VMOpsTestBase, self).tearDown()
for vm in self.vms:
xenapi_fake.destroy_vm(vm)
class VMOpsTestCase(VMOpsTestBase):
def setUp(self):
super(VMOpsTestCase, self).setUp()
self._setup_mock_vmops()
self.context = context.RequestContext('user', 'project')
self.instance = fake_instance.fake_instance_obj(self.context)
def _setup_mock_vmops(self, product_brand=None, product_version=None):
self._session = self._get_mock_session(product_brand, product_version)
self._vmops = vmops.VMOps(self._session, fake.FakeVirtAPI())
def _get_mock_session(self, product_brand, product_version):
class Mock(object):
pass
mock_session = Mock()
mock_session.product_brand = product_brand
mock_session.product_version = product_version
return mock_session
def _test_finish_revert_migration_after_crash(self, backup_made, new_made,
vm_shutdown=True):
instance = {'name': 'foo',
'task_state': task_states.RESIZE_MIGRATING}
context = 'fake_context'
lookup_returns = [backup_made and 'foo' or None,
(not backup_made or new_made) and 'foo' or None]
@mock.patch.object(vm_utils, 'lookup',
side_effect=lookup_returns)
@mock.patch.object(self._vmops, '_destroy')
@mock.patch.object(vm_utils, 'set_vm_name_label')
@mock.patch.object(self._vmops, '_attach_mapped_block_devices')
@mock.patch.object(self._vmops, '_start')
@mock.patch.object(vm_utils, 'is_vm_shutdown',
return_value=vm_shutdown)
def test(mock_is_vm, mock_start, mock_attach_bdm, mock_set_vm_name,
mock_destroy, mock_lookup):
self._vmops.finish_revert_migration(context, instance, [])
mock_lookup.assert_has_calls([mock.call(self._session, 'foo-orig'),
mock.call(self._session, 'foo')])
if backup_made:
if new_made:
mock_destroy.assert_called_once_with(instance, 'foo')
mock_set_vm_name.assert_called_once_with(self._session, 'foo',
'foo')
mock_attach_bdm.assert_called_once_with(instance, [])
mock_is_vm.assert_called_once_with(self._session, 'foo')
if vm_shutdown:
mock_start.assert_called_once_with(instance, 'foo')
test()
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(True, True)
def test_finish_revert_migration_after_crash_before_new(self):
self._test_finish_revert_migration_after_crash(True, False)
def test_finish_revert_migration_after_crash_before_backup(self):
self._test_finish_revert_migration_after_crash(False, False)
@mock.patch.object(vm_utils, 'lookup', return_value=None)
def test_get_vm_opaque_ref_raises_instance_not_found(self, mock_lookup):
instance = {"name": "dummy"}
self.assertRaises(exception.InstanceNotFound,
self._vmops._get_vm_opaque_ref, instance)
mock_lookup.assert_called_once_with(self._session, instance['name'],
False)
@mock.patch.object(vm_utils, 'destroy_vm')
@mock.patch.object(vm_utils, 'clean_shutdown_vm')
@mock.patch.object(vm_utils, 'hard_shutdown_vm')
def test_clean_shutdown_no_bdm_on_destroy(self, hard_shutdown_vm,
clean_shutdown_vm, destroy_vm):
vm_ref = 'vm_ref'
self._vmops._destroy(self.instance, vm_ref, destroy_disks=False)
hard_shutdown_vm.assert_called_once_with(self._vmops._session,
self.instance, vm_ref)
self.assertEqual(0, clean_shutdown_vm.call_count)
@mock.patch.object(vm_utils, 'destroy_vm')
@mock.patch.object(vm_utils, 'clean_shutdown_vm')
@mock.patch.object(vm_utils, 'hard_shutdown_vm')
def test_clean_shutdown_with_bdm_on_destroy(self, hard_shutdown_vm,
clean_shutdown_vm, destroy_vm):
vm_ref = 'vm_ref'
block_device_info = {'block_device_mapping': ['fake']}
self._vmops._destroy(self.instance, vm_ref, destroy_disks=False,
block_device_info=block_device_info)
clean_shutdown_vm.assert_called_once_with(self._vmops._session,
self.instance, vm_ref)
self.assertEqual(0, hard_shutdown_vm.call_count)
@mock.patch.object(vm_utils, 'destroy_vm')
@mock.patch.object(vm_utils, 'clean_shutdown_vm', return_value=False)
@mock.patch.object(vm_utils, 'hard_shutdown_vm')
def test_clean_shutdown_with_bdm_failed_on_destroy(self, hard_shutdown_vm,
clean_shutdown_vm, destroy_vm):
vm_ref = 'vm_ref'
block_device_info = {'block_device_mapping': ['fake']}
self._vmops._destroy(self.instance, vm_ref, destroy_disks=False,
block_device_info=block_device_info)
clean_shutdown_vm.assert_called_once_with(self._vmops._session,
self.instance, vm_ref)
hard_shutdown_vm.assert_called_once_with(self._vmops._session,
self.instance, vm_ref)
@mock.patch.object(vm_utils, 'try_auto_configure_disk')
@mock.patch.object(vm_utils, 'create_vbd',
side_effect=test.TestingException)
def test_attach_disks_rescue_auto_disk_config_false(self, create_vbd,
try_auto_config):
ctxt = context.RequestContext('user', 'project')
instance = fake_instance.fake_instance_obj(ctxt)
image_meta = objects.ImageMeta.from_dict(
{'properties': {'auto_disk_config': 'false'}})
vdis = {'root': {'ref': 'fake-ref'}}
self.assertRaises(test.TestingException, self._vmops._attach_disks,
ctxt, instance, image_meta=image_meta, vm_ref=None,
name_label=None, vdis=vdis, disk_image_type='fake',
network_info=[], rescue=True)
self.assertFalse(try_auto_config.called)
@mock.patch.object(vm_utils, 'try_auto_configure_disk')
@mock.patch.object(vm_utils, 'create_vbd',
side_effect=test.TestingException)
def test_attach_disks_rescue_auto_disk_config_true(self, create_vbd,
try_auto_config):
ctxt = context.RequestContext('user', 'project')
instance = fake_instance.fake_instance_obj(ctxt)
image_meta = objects.ImageMeta.from_dict(
{'properties': {'auto_disk_config': 'true'}})
vdis = {'root': {'ref': 'fake-ref'}}
self.assertRaises(test.TestingException, self._vmops._attach_disks,
ctxt, instance, image_meta=image_meta, vm_ref=None,
name_label=None, vdis=vdis, disk_image_type='fake',
network_info=[], rescue=True)
try_auto_config.assert_called_once_with(self._vmops._session,
'fake-ref', instance.flavor.root_gb)
class InjectAutoDiskConfigTestCase(VMOpsTestBase):
def test_inject_auto_disk_config_when_present(self):
vm, vm_ref = self.create_vm("dummy")
instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": True}
self.vmops._inject_auto_disk_config(instance, vm_ref)
xenstore_data = vm['xenstore_data']
self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'True')
def test_inject_auto_disk_config_none_as_false(self):
vm, vm_ref = self.create_vm("dummy")
instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
self.vmops._inject_auto_disk_config(instance, vm_ref)
xenstore_data = vm['xenstore_data']
self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'False')
class GetConsoleOutputTestCase(VMOpsTestBase):
def test_get_console_output_works(self):
ctxt = context.RequestContext('user', 'project')
instance = fake_instance.fake_instance_obj(ctxt)
with mock.patch.object(self.vmops, '_get_last_dom_id',
return_value=42) as mock_last_dom:
self.assertEqual(b"dom_id: 42",
self.vmops.get_console_output(instance))
mock_last_dom.assert_called_once_with(instance, check_rescue=True)
def test_get_console_output_not_available(self):
ctxt = context.RequestContext('user', 'project')
instance = fake_instance.fake_instance_obj(ctxt)
# dom_id=0 used to trigger exception in fake XenAPI
with mock.patch.object(self.vmops, '_get_last_dom_id',
return_value=0) as mock_last_dom:
self.assertRaises(exception.ConsoleNotAvailable,
self.vmops.get_console_output, instance)
mock_last_dom.assert_called_once_with(instance, check_rescue=True)
def test_get_dom_id_works(self):
instance = {"name": "dummy"}
vm, vm_ref = self.create_vm("dummy")
self.assertEqual(vm["domid"], self.vmops._get_dom_id(instance))
def test_get_dom_id_works_with_rescue_vm(self):
instance = {"name": "dummy"}
vm, vm_ref = self.create_vm("dummy-rescue")
self.assertEqual(vm["domid"],
self.vmops._get_dom_id(instance, check_rescue=True))
def test_get_dom_id_raises_not_found(self):
instance = {"name": "dummy"}
self.create_vm("not-dummy")
self.assertRaises(exception.InstanceNotFound,
self.vmops._get_dom_id, instance)
def test_get_dom_id_works_with_vmref(self):
vm, vm_ref = self.create_vm("dummy")
instance = {'name': 'dummy'}
self.assertEqual(vm["domid"],
self.vmops._get_dom_id(instance, vm_ref=vm_ref))
def test_get_dom_id_fails_if_shutdown(self):
vm, vm_ref = self.create_vm("dummy")
instance = {'name': 'dummy'}
self._session.VM.hard_shutdown(vm_ref)
self.assertRaises(exception.InstanceNotFound,
self.vmops._get_dom_id, instance, vm_ref=vm_ref)
class SpawnTestCase(VMOpsTestBase):
def _stub_out_common(self):
self.mox.StubOutWithMock(self.vmops, '_ensure_instance_name_unique')
self.mox.StubOutWithMock(self.vmops, '_ensure_enough_free_mem')
self.mox.StubOutWithMock(self.vmops, '_update_instance_progress')
self.mox.StubOutWithMock(vm_utils, 'determine_disk_image_type')
self.mox.StubOutWithMock(self.vmops, '_get_vdis_for_instance')
self.mox.StubOutWithMock(vm_utils, 'safe_destroy_vdis')
self.mox.StubOutWithMock(self.vmops._volumeops,
'safe_cleanup_from_vdis')
self.mox.StubOutWithMock(self.vmops, '_resize_up_vdis')
self.mox.StubOutWithMock(vm_utils,
'create_kernel_and_ramdisk')
self.mox.StubOutWithMock(vm_utils, 'destroy_kernel_ramdisk')
self.mox.StubOutWithMock(self.vmops, '_create_vm_record')
self.mox.StubOutWithMock(self.vmops, '_destroy')
self.mox.StubOutWithMock(self.vmops, '_attach_disks')
self.mox.StubOutWithMock(self.vmops, '_save_device_metadata')
self.mox.StubOutWithMock(self.vmops, '_prepare_disk_metadata')
self.mox.StubOutWithMock(pci_manager, 'get_instance_pci_devs')
self.mox.StubOutWithMock(vm_utils, 'set_other_config_pci')
self.mox.StubOutWithMock(self.vmops, '_attach_orig_disks')
self.mox.StubOutWithMock(self.vmops, 'inject_network_info')
self.mox.StubOutWithMock(self.vmops, '_inject_hostname')
self.mox.StubOutWithMock(self.vmops, '_inject_instance_metadata')
self.mox.StubOutWithMock(self.vmops, '_inject_auto_disk_config')
self.mox.StubOutWithMock(self.vmops, '_file_inject_vm_settings')
self.mox.StubOutWithMock(self.vmops, '_create_vifs')
self.mox.StubOutWithMock(self.vmops.firewall_driver,
'setup_basic_filtering')
self.mox.StubOutWithMock(self.vmops.firewall_driver,
'prepare_instance_filter')
self.mox.StubOutWithMock(self.vmops, '_start')
self.mox.StubOutWithMock(self.vmops, '_wait_for_instance_to_start')
self.mox.StubOutWithMock(self.vmops,
'_configure_new_instance_with_agent')
self.mox.StubOutWithMock(self.vmops, '_remove_hostname')
self.mox.StubOutWithMock(self.vmops.firewall_driver,
'apply_instance_filter')
self.mox.StubOutWithMock(self.vmops, '_update_last_dom_id')
self.mox.StubOutWithMock(self.vmops._session, 'call_xenapi')
@staticmethod
def _new_instance(obj):
class _Instance(dict):
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
return _Instance(**obj)
def _test_spawn(self, name_label_param=None, block_device_info_param=None,
rescue=False, include_root_vdi=True, throw_exception=None,
attach_pci_dev=False, neutron_exception=False,
network_info=None):
self._stub_out_common()
instance = self._new_instance({"name": "dummy", "uuid": "fake_uuid",
"device_metadata": None})
name_label = name_label_param
if name_label is None:
name_label = "dummy"
image_meta = objects.ImageMeta.from_dict({"id": uuids.image_id})
context = "context"
session = self.vmops._session
injected_files = "fake_files"
admin_password = "password"
if network_info is None:
network_info = []
steps = 10
if rescue:
steps += 1
block_device_info = block_device_info_param
if block_device_info and not block_device_info['root_device_name']:
block_device_info = dict(block_device_info_param)
block_device_info['root_device_name'] = \
self.vmops.default_root_dev
di_type = "di_type"
vm_utils.determine_disk_image_type(image_meta).AndReturn(di_type)
step = 1
self.vmops._update_instance_progress(context, instance, step, steps)
vdis = {"other": {"ref": "fake_ref_2", "osvol": True}}
if include_root_vdi:
vdis["root"] = {"ref": "fake_ref"}
self.vmops._get_vdis_for_instance(context, instance,
name_label, image_meta, di_type,
block_device_info).AndReturn(vdis)
self.vmops._resize_up_vdis(instance, vdis)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
kernel_file = "kernel"
ramdisk_file = "ramdisk"
vm_utils.create_kernel_and_ramdisk(context, session,
instance, name_label).AndReturn((kernel_file, ramdisk_file))
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
vm_ref = "fake_vm_ref"
self.vmops._ensure_instance_name_unique(name_label)
self.vmops._ensure_enough_free_mem(instance)
self.vmops._create_vm_record(context, instance, name_label,
di_type, kernel_file,
ramdisk_file, image_meta, rescue).AndReturn(vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops._save_device_metadata(context, instance, block_device_info)
self.vmops._attach_disks(context, instance, image_meta, vm_ref,
name_label, vdis, di_type, network_info, rescue,
admin_password, injected_files)
if attach_pci_dev:
fake_dev = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 1,
'compute_node_id': 1,
'address': '00:00.0',
'vendor_id': '1234',
'product_id': 'abcd',
'dev_type': fields.PciDeviceType.STANDARD,
'status': 'available',
'dev_id': 'devid',
'label': 'label',
'instance_uuid': None,
'extra_info': '{}',
}
pci_manager.get_instance_pci_devs(instance).AndReturn([fake_dev])
vm_utils.set_other_config_pci(self.vmops._session,
vm_ref,
"0/0000:00:00.0")
else:
pci_manager.get_instance_pci_devs(instance).AndReturn([])
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops._inject_instance_metadata(instance, vm_ref)
self.vmops._inject_auto_disk_config(instance, vm_ref)
self.vmops._inject_hostname(instance, vm_ref, rescue)
self.vmops._file_inject_vm_settings(instance, vm_ref, vdis,
network_info)
self.vmops.inject_network_info(instance, network_info, vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
if neutron_exception:
events = [('network-vif-plugged', 1)]
self.vmops._get_neutron_events(network_info,
True, True, False).AndReturn(events)
self.mox.StubOutWithMock(self.vmops, '_neutron_failed_callback')
self.mox.StubOutWithMock(self.vmops._virtapi,
'wait_for_instance_event')
self.vmops._virtapi.wait_for_instance_event(instance, events,
deadline=300,
error_callback=self.vmops._neutron_failed_callback).\
AndRaise(exception.VirtualInterfaceCreateException)
else:
self.vmops._create_vifs(instance, vm_ref, network_info)
self.vmops.firewall_driver.setup_basic_filtering(instance,
network_info).AndRaise(NotImplementedError)
self.vmops.firewall_driver.prepare_instance_filter(instance,
network_info)
step += 1
self.vmops._update_instance_progress(context, instance,
step, steps)
if rescue:
self.vmops._attach_orig_disks(instance, vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step,
steps)
start_pause = True
self.vmops._start(instance, vm_ref, start_pause=start_pause)
step += 1
self.vmops._update_instance_progress(context, instance,
step, steps)
self.vmops.firewall_driver.apply_instance_filter(instance,
network_info)
step += 1
self.vmops._update_instance_progress(context, instance,
step, steps)
self.vmops._session.call_xenapi('VM.unpause', vm_ref)
self.vmops._wait_for_instance_to_start(instance, vm_ref)
self.vmops._update_last_dom_id(vm_ref)
self.vmops._configure_new_instance_with_agent(instance, vm_ref,
injected_files, admin_password)
self.vmops._remove_hostname(instance, vm_ref)
step += 1
last_call = self.vmops._update_instance_progress(context, instance,
step, steps)
if throw_exception:
last_call.AndRaise(throw_exception)
if throw_exception or neutron_exception:
self.vmops._destroy(instance, vm_ref, network_info=network_info)
vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance,
kernel_file, ramdisk_file)
vm_utils.safe_destroy_vdis(self.vmops._session, ["fake_ref"])
self.vmops._volumeops.safe_cleanup_from_vdis(["fake_ref_2"])
self.mox.ReplayAll()
self.vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info,
block_device_info_param, name_label_param, rescue)
def test_spawn(self):
self._test_spawn()
def test_spawn_with_alternate_options(self):
self._test_spawn(include_root_vdi=False, rescue=True,
name_label_param="bob",
block_device_info_param={"root_device_name": ""})
def test_spawn_with_pci_available_on_the_host(self):
self._test_spawn(attach_pci_dev=True)
def test_spawn_performs_rollback_and_throws_exception(self):
self.assertRaises(test.TestingException, self._test_spawn,
throw_exception=test.TestingException())
def test_spawn_with_neutron(self):
self.flags(use_neutron=True)
self.mox.StubOutWithMock(self.vmops, '_get_neutron_events')
events = [('network-vif-plugged', 1)]
network_info = [{'id': 1, 'active': True}]
self.vmops._get_neutron_events(network_info,
True, True, False).AndReturn(events)
self.mox.StubOutWithMock(self.vmops,
'_neutron_failed_callback')
self._test_spawn(network_info=network_info)
@staticmethod
def _dev_mock(obj):
dev = mock.MagicMock(**obj)
dev.__contains__.side_effect = (
lambda attr: getattr(dev, attr, None) is not None)
return dev
@mock.patch.object(objects, 'XenDeviceBus')
@mock.patch.object(objects, 'IDEDeviceBus')
@mock.patch.object(objects, 'DiskMetadata')
def test_prepare_disk_metadata(self, mock_DiskMetadata,
mock_IDEDeviceBus, mock_XenDeviceBus):
mock_IDEDeviceBus.side_effect = \
lambda **kw: \
self._dev_mock({"address": kw.get("address"), "bus": "ide"})
mock_XenDeviceBus.side_effect = \
lambda **kw: \
self._dev_mock({"address": kw.get("address"), "bus": "xen"})
mock_DiskMetadata.side_effect = \
lambda **kw: self._dev_mock(dict(**kw))
bdm = self._dev_mock({"device_name": "/dev/xvda", "tag": "disk_a"})
disk_metadata = self.vmops._prepare_disk_metadata(bdm)
self.assertEqual(disk_metadata[0].tags, ["disk_a"])
self.assertEqual(disk_metadata[0].bus.bus, "ide")
self.assertEqual(disk_metadata[0].bus.address, "0:0")
self.assertEqual(disk_metadata[1].tags, ["disk_a"])
self.assertEqual(disk_metadata[1].bus.bus, "xen")
self.assertEqual(disk_metadata[1].bus.address, "000000")
self.assertEqual(disk_metadata[2].tags, ["disk_a"])
self.assertEqual(disk_metadata[2].bus.bus, "xen")
self.assertEqual(disk_metadata[2].bus.address, "51712")
self.assertEqual(disk_metadata[3].tags, ["disk_a"])
self.assertEqual(disk_metadata[3].bus.bus, "xen")
self.assertEqual(disk_metadata[3].bus.address, "768")
bdm = self._dev_mock({"device_name": "/dev/xvdc", "tag": "disk_c"})
disk_metadata = self.vmops._prepare_disk_metadata(bdm)
self.assertEqual(disk_metadata[0].tags, ["disk_c"])
self.assertEqual(disk_metadata[0].bus.bus, "ide")
self.assertEqual(disk_metadata[0].bus.address, "1:0")
self.assertEqual(disk_metadata[1].tags, ["disk_c"])
self.assertEqual(disk_metadata[1].bus.bus, "xen")
self.assertEqual(disk_metadata[1].bus.address, "000200")
self.assertEqual(disk_metadata[2].tags, ["disk_c"])
self.assertEqual(disk_metadata[2].bus.bus, "xen")
self.assertEqual(disk_metadata[2].bus.address, "51744")
self.assertEqual(disk_metadata[3].tags, ["disk_c"])
self.assertEqual(disk_metadata[3].bus.bus, "xen")
self.assertEqual(disk_metadata[3].bus.address, "5632")
bdm = self._dev_mock({"device_name": "/dev/xvde", "tag": "disk_e"})
disk_metadata = self.vmops._prepare_disk_metadata(bdm)
self.assertEqual(disk_metadata[0].tags, ["disk_e"])
self.assertEqual(disk_metadata[0].bus.bus, "xen")
self.assertEqual(disk_metadata[0].bus.address, "000400")
self.assertEqual(disk_metadata[1].tags, ["disk_e"])
self.assertEqual(disk_metadata[1].bus.bus, "xen")
self.assertEqual(disk_metadata[1].bus.address, "51776")
@mock.patch.object(objects.VirtualInterfaceList, 'get_by_instance_uuid')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(objects, 'NetworkInterfaceMetadata')
@mock.patch.object(objects, 'InstanceDeviceMetadata')
@mock.patch.object(objects, 'PCIDeviceBus')
@mock.patch.object(vmops.VMOps, '_prepare_disk_metadata')
def test_save_device_metadata(self, mock_prepare_disk_metadata,
mock_PCIDeviceBus, mock_InstanceDeviceMetadata,
mock_NetworkInterfaceMetadata, mock_get_bdms, mock_get_vifs):
context = {}
instance = {"uuid": "fake_uuid"}
vif = self._dev_mock({"address": "fake_address", "tag": "vif_tag"})
bdm = self._dev_mock({"device_name": "/dev/xvdx", "tag": "bdm_tag"})
block_device_info = {'block_device_mapping': [bdm]}
mock_get_vifs.return_value = [vif]
mock_get_bdms.return_value = [bdm]
mock_InstanceDeviceMetadata.side_effect = \
lambda **kw: {"devices": kw.get("devices")}
mock_NetworkInterfaceMetadata.return_value = mock.sentinel.vif_metadata
mock_prepare_disk_metadata.return_value = [mock.sentinel.bdm_metadata]
dev_meta = self.vmops._save_device_metadata(context, instance,
block_device_info)
mock_get_vifs.assert_called_once_with(context, instance["uuid"])
mock_NetworkInterfaceMetadata.assert_called_once_with(mac=vif.address,
bus=mock_PCIDeviceBus.return_value,
tags=[vif.tag])
mock_prepare_disk_metadata.assert_called_once_with(bdm)
self.assertEqual(dev_meta["devices"],
[mock.sentinel.vif_metadata, mock.sentinel.bdm_metadata])
@mock.patch.object(objects.VirtualInterfaceList, 'get_by_instance_uuid')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(vmops.VMOps, '_prepare_disk_metadata')
def test_save_device_metadata_no_vifs_no_bdms(
self, mock_prepare_disk_metadata, mock_get_bdms, mock_get_vifs):
"""Tests that we don't save any device metadata when there are no
VIFs or BDMs.
"""
ctxt = context.RequestContext('fake-user', 'fake-project')
instance = objects.Instance(uuid=uuids.instance_uuid)
block_device_info = {'block_device_mapping': []}
mock_get_vifs.return_value = objects.VirtualInterfaceList()
dev_meta = self.vmops._save_device_metadata(
ctxt, instance, block_device_info)
self.assertIsNone(dev_meta)
mock_get_vifs.assert_called_once_with(ctxt, uuids.instance_uuid)
mock_get_bdms.assert_not_called()
mock_prepare_disk_metadata.assert_not_called()
def test_spawn_with_neutron_exception(self):
self.mox.StubOutWithMock(self.vmops, '_get_neutron_events')
self.assertRaises(exception.VirtualInterfaceCreateException,
self._test_spawn, neutron_exception=True)
def _test_finish_migration(self, power_on=True, resize_instance=True,
throw_exception=None, booted_from_volume=False):
self._stub_out_common()
self.mox.StubOutWithMock(volumeops.VolumeOps, "connect_volume")
self.mox.StubOutWithMock(vm_utils, "import_all_migrated_disks")
self.mox.StubOutWithMock(self.vmops, "_attach_mapped_block_devices")
context = "context"
migration = {}
name_label = "dummy"
instance = self._new_instance({"name": name_label, "uuid": "fake_uuid",
"root_device_name": "/dev/xvda", "device_metadata": None})
disk_info = "disk_info"
network_info = "net_info"
image_meta = objects.ImageMeta.from_dict({"id": uuids.image_id})
block_device_info = {}
import_root = True
if booted_from_volume:
block_device_info = {'block_device_mapping': [
{'mount_device': '/dev/xvda',
'connection_info': {'data': 'fake-data'}}]}
import_root = False
volumeops.VolumeOps.connect_volume(
{'data': 'fake-data'}).AndReturn(('sr', 'vol-vdi-uuid'))
self.vmops._session.call_xenapi('VDI.get_by_uuid',
'vol-vdi-uuid').AndReturn('vol-vdi-ref')
session = self.vmops._session
self.vmops._ensure_instance_name_unique(name_label)
self.vmops._ensure_enough_free_mem(instance)
di_type = "di_type"
vm_utils.determine_disk_image_type(image_meta).AndReturn(di_type)
root_vdi = {"ref": "fake_ref"}
ephemeral_vdi = {"ref": "fake_ref_e"}
vdis = {"root": root_vdi, "ephemerals": {4: ephemeral_vdi}}
vm_utils.import_all_migrated_disks(self.vmops._session, instance,
import_root=import_root).AndReturn(vdis)
kernel_file = "kernel"
ramdisk_file = "ramdisk"
vm_utils.create_kernel_and_ramdisk(context, session,
instance, name_label).AndReturn((kernel_file, ramdisk_file))
vm_ref = "fake_vm_ref"
rescue = False
self.vmops._create_vm_record(context, instance, name_label,
di_type, kernel_file,
ramdisk_file, image_meta, rescue).AndReturn(vm_ref)
if resize_instance:
self.vmops._resize_up_vdis(instance, vdis)
self.vmops._save_device_metadata(context, instance, block_device_info)
self.vmops._attach_disks(context, instance, image_meta, vm_ref,
name_label, vdis, di_type, network_info, False,
None, None)
self.vmops._attach_mapped_block_devices(instance, block_device_info)
pci_manager.get_instance_pci_devs(instance).AndReturn([])
self.vmops._inject_instance_metadata(instance, vm_ref)
self.vmops._inject_auto_disk_config(instance, vm_ref)
self.vmops._file_inject_vm_settings(instance, vm_ref, vdis,
network_info)
self.vmops.inject_network_info(instance, network_info, vm_ref)
self.vmops._create_vifs(instance, vm_ref, network_info)
self.vmops.firewall_driver.setup_basic_filtering(instance,
network_info).AndRaise(NotImplementedError)
self.vmops.firewall_driver.prepare_instance_filter(instance,
network_info)
if power_on:
self.vmops._start(instance, vm_ref, start_pause=True)
self.vmops.firewall_driver.apply_instance_filter(instance,
network_info)
if power_on:
self.vmops._session.call_xenapi('VM.unpause', vm_ref)
self.vmops._wait_for_instance_to_start(instance, vm_ref)
self.vmops._update_last_dom_id(vm_ref)
last_call = self.vmops._update_instance_progress(context, instance,
step=5, total_steps=5)
if throw_exception:
last_call.AndRaise(throw_exception)
self.vmops._destroy(instance, vm_ref, network_info=network_info)
vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance,
kernel_file, ramdisk_file)
vm_utils.safe_destroy_vdis(self.vmops._session,
["fake_ref_e", "fake_ref"])
self.mox.ReplayAll()
self.vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info, power_on)
def test_finish_migration(self):
self._test_finish_migration()
def test_finish_migration_no_power_on(self):
self._test_finish_migration(power_on=False, resize_instance=False)
def test_finish_migration_booted_from_volume(self):
self._test_finish_migration(booted_from_volume=True)
def test_finish_migrate_performs_rollback_on_error(self):
self.assertRaises(test.TestingException, self._test_finish_migration,
power_on=False, resize_instance=False,
throw_exception=test.TestingException())
def test_remove_hostname(self):
vm, vm_ref = self.create_vm("dummy")
instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
self.mox.StubOutWithMock(self._session, 'call_xenapi')
self._session.call_xenapi("VM.remove_from_xenstore_data", vm_ref,
"vm-data/hostname")
self.mox.ReplayAll()
self.vmops._remove_hostname(instance, vm_ref)
self.mox.VerifyAll()
def test_reset_network(self):
class mock_agent(object):
def __init__(self):
self.called = False
def resetnetwork(self):
self.called = True
vm, vm_ref = self.create_vm("dummy")
instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
agent = mock_agent()
self.mox.StubOutWithMock(self.vmops, 'agent_enabled')
self.mox.StubOutWithMock(self.vmops, '_get_agent')
self.mox.StubOutWithMock(self.vmops, '_inject_hostname')
self.mox.StubOutWithMock(self.vmops, '_remove_hostname')
self.vmops.agent_enabled(instance).AndReturn(True)
self.vmops._get_agent(instance, vm_ref).AndReturn(agent)
self.vmops._inject_hostname(instance, vm_ref, False)
self.vmops._remove_hostname(instance, vm_ref)
self.mox.ReplayAll()
self.vmops.reset_network(instance)
self.assertTrue(agent.called)
self.mox.VerifyAll()
def test_inject_hostname(self):
instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname', 'dummy')
self.mox.ReplayAll()
self.vmops._inject_hostname(instance, vm_ref, rescue=False)
def test_inject_hostname_with_rescue_prefix(self):
instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname',
'RESCUE-dummy')
self.mox.ReplayAll()
self.vmops._inject_hostname(instance, vm_ref, rescue=True)
def test_inject_hostname_with_windows_name_truncation(self):
instance = {"hostname": "dummydummydummydummydummy",
"os_type": "windows", "uuid": "uuid"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname',
'RESCUE-dummydum')
self.mox.ReplayAll()
self.vmops._inject_hostname(instance, vm_ref, rescue=True)
def test_wait_for_instance_to_start(self):
instance = {"uuid": "uuid"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(vm_utils, 'get_power_state')
self.mox.StubOutWithMock(greenthread, 'sleep')
vm_utils.get_power_state(self._session, vm_ref).AndReturn(
power_state.SHUTDOWN)
greenthread.sleep(0.5)
vm_utils.get_power_state(self._session, vm_ref).AndReturn(
power_state.RUNNING)
self.mox.ReplayAll()
self.vmops._wait_for_instance_to_start(instance, vm_ref)
@mock.patch.object(vm_utils, 'lookup', return_value='ref')
@mock.patch.object(vm_utils, 'create_vbd')
def test_attach_orig_disks(self, mock_create_vbd, mock_lookup):
instance = {"name": "dummy"}
vm_ref = "vm_ref"
vbd_refs = {vmops.DEVICE_ROOT: "vdi_ref"}
with mock.patch.object(self.vmops, '_find_vdi_refs',
return_value=vbd_refs) as mock_find_vdi:
self.vmops._attach_orig_disks(instance, vm_ref)
mock_lookup.assert_called_once_with(self.vmops._session, 'dummy')
mock_find_vdi.assert_called_once_with('ref', exclude_volumes=True)
mock_create_vbd.assert_called_once_with(
self.vmops._session, vm_ref, 'vdi_ref', vmops.DEVICE_RESCUE,
bootable=False)
def test_agent_update_setup(self):
# agent updates need to occur after networking is configured
instance = {'name': 'betelgeuse',
'uuid': '1-2-3-4-5-6'}
vm_ref = 'vm_ref'
agent = xenapi_agent.XenAPIBasedAgent(self.vmops._session,
self.vmops._virtapi, instance, vm_ref)
self.mox.StubOutWithMock(xenapi_agent, 'should_use_agent')
self.mox.StubOutWithMock(self.vmops, '_get_agent')
self.mox.StubOutWithMock(agent, 'get_version')
self.mox.StubOutWithMock(agent, 'resetnetwork')
self.mox.StubOutWithMock(agent, 'update_if_needed')
xenapi_agent.should_use_agent(instance).AndReturn(True)
self.vmops._get_agent(instance, vm_ref).AndReturn(agent)
agent.get_version().AndReturn('1.2.3')
agent.resetnetwork()
agent.update_if_needed('1.2.3')
self.mox.ReplayAll()
self.vmops._configure_new_instance_with_agent(instance, vm_ref,
None, None)
@mock.patch.object(utils, 'is_neutron', return_value=True)
def test_get_neutron_event(self, mock_is_neutron):
network_info = [{"active": False, "id": 1},
{"active": True, "id": 2},
{"active": False, "id": 3},
{"id": 4}]
power_on = True
first_boot = True
rescue = False
events = self.vmops._get_neutron_events(network_info,
power_on, first_boot, rescue)
self.assertEqual("network-vif-plugged", events[0][0])
self.assertEqual(1, events[0][1])
self.assertEqual("network-vif-plugged", events[1][0])
self.assertEqual(3, events[1][1])
@mock.patch.object(utils, 'is_neutron', return_value=False)
def test_get_neutron_event_not_neutron_network(self, mock_is_neutron):
network_info = [{"active": False, "id": 1},
{"active": True, "id": 2},
{"active": False, "id": 3},
{"id": 4}]
power_on = True
first_boot = True
rescue = False
events = self.vmops._get_neutron_events(network_info,
power_on, first_boot, rescue)
self.assertEqual([], events)
@mock.patch.object(utils, 'is_neutron', return_value=True)
def test_get_neutron_event_power_off(self, mock_is_neutron):
network_info = [{"active": False, "id": 1},
{"active": True, "id": 2},
{"active": False, "id": 3},
{"id": 4}]
power_on = False
first_boot = True
rescue = False
events = self.vmops._get_neutron_events(network_info,
power_on, first_boot, rescue)
self.assertEqual([], events)
@mock.patch.object(utils, 'is_neutron', return_value=True)
def test_get_neutron_event_not_first_boot(self, mock_is_neutron):
network_info = [{"active": False, "id": 1},
{"active": True, "id": 2},
{"active": False, "id": 3},
{"id": 4}]
power_on = True
first_boot = False
rescue = False
events = self.vmops._get_neutron_events(network_info,
power_on, first_boot, rescue)
self.assertEqual([], events)
@mock.patch.object(utils, 'is_neutron', return_value=True)
def test_get_neutron_event_rescue(self, mock_is_neutron):
network_info = [{"active": False, "id": 1},
{"active": True, "id": 2},
{"active": False, "id": 3},
{"id": 4}]
power_on = True
first_boot = True
rescue = True
events = self.vmops._get_neutron_events(network_info,
power_on, first_boot, rescue)
self.assertEqual([], events)
class DestroyTestCase(VMOpsTestBase):
def setUp(self):
super(DestroyTestCase, self).setUp()
self.context = context.RequestContext(user_id=None, project_id=None)
self.instance = fake_instance.fake_instance_obj(self.context)
@mock.patch.object(vm_utils, 'lookup', side_effect=[None, None])
@mock.patch.object(vm_utils, 'hard_shutdown_vm')
@mock.patch.object(volume_utils, 'find_sr_by_uuid')
@mock.patch.object(volume_utils, 'forget_sr')
def test_no_vm_no_bdm(self, forget_sr, find_sr_by_uuid, hard_shutdown_vm,
lookup):
self.vmops.destroy(self.instance, 'network_info',
{'block_device_mapping': []})
self.assertEqual(0, find_sr_by_uuid.call_count)
self.assertEqual(0, forget_sr.call_count)
self.assertEqual(0, hard_shutdown_vm.call_count)
@mock.patch.object(vm_utils, 'lookup', side_effect=[None, None])
@mock.patch.object(vm_utils, 'hard_shutdown_vm')
@mock.patch.object(volume_utils, 'find_sr_by_uuid', return_value=None)
@mock.patch.object(volume_utils, 'forget_sr')
def test_no_vm_orphaned_volume_no_sr(self, forget_sr, find_sr_by_uuid,
hard_shutdown_vm, lookup):
self.vmops.destroy(self.instance, 'network_info',
{'block_device_mapping': [{'connection_info':
{'data': {'volume_id': 'fake-uuid'}}}]})
find_sr_by_uuid.assert_called_once_with(self.vmops._session,
'FA15E-D15C-fake-uuid')
self.assertEqual(0, forget_sr.call_count)
self.assertEqual(0, hard_shutdown_vm.call_count)
@mock.patch.object(vm_utils, 'lookup', side_effect=[None, None])
@mock.patch.object(vm_utils, 'hard_shutdown_vm')
@mock.patch.object(volume_utils, 'find_sr_by_uuid', return_value='sr_ref')
@mock.patch.object(volume_utils, 'forget_sr')
def test_no_vm_orphaned_volume_old_sr(self, forget_sr, find_sr_by_uuid,
hard_shutdown_vm, lookup):
self.vmops.destroy(self.instance, 'network_info',
{'block_device_mapping': [{'connection_info':
{'data': {'volume_id': 'fake-uuid'}}}]})
find_sr_by_uuid.assert_called_once_with(self.vmops._session,
'FA15E-D15C-fake-uuid')
forget_sr.assert_called_once_with(self.vmops._session, 'sr_ref')
self.assertEqual(0, hard_shutdown_vm.call_count)
@mock.patch.object(vm_utils, 'lookup', side_effect=[None, None])
@mock.patch.object(vm_utils, 'hard_shutdown_vm')
@mock.patch.object(volume_utils, 'find_sr_by_uuid',
side_effect=[None, 'sr_ref'])
@mock.patch.object(volume_utils, 'forget_sr')
@mock.patch.object(uuid, 'uuid5', return_value='fake-uuid')
def test_no_vm_orphaned_volume(self, uuid5, forget_sr,
find_sr_by_uuid, hard_shutdown_vm, lookup):
fake_data = {'volume_id': 'fake-uuid',
'target_portal': 'host:port',
'target_iqn': 'iqn'}
self.vmops.destroy(self.instance, 'network_info',
{'block_device_mapping': [{'connection_info':
{'data': fake_data}}]})
call1 = mock.call(self.vmops._session, 'FA15E-D15C-fake-uuid')
call2 = mock.call(self.vmops._session, 'fake-uuid')
uuid5.assert_called_once_with(volume_utils.SR_NAMESPACE,
'host/port/iqn')
find_sr_by_uuid.assert_has_calls([call1, call2])
forget_sr.assert_called_once_with(self.vmops._session, 'sr_ref')
self.assertEqual(0, hard_shutdown_vm.call_count)
@mock.patch.object(vmops.VMOps, '_update_instance_progress')
@mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref')
@mock.patch.object(vm_utils, 'get_sr_path')
@mock.patch.object(vmops.VMOps, '_detach_block_devices_from_orig_vm')
@mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_down')
@mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_up')
class MigrateDiskAndPowerOffTestCase(VMOpsTestBase):
def setUp(self):
super(MigrateDiskAndPowerOffTestCase, self).setUp()
self.context = context.RequestContext('user', 'project')
def test_migrate_disk_and_power_off_works_down(self,
migrate_up, migrate_down, *mocks):
instance = objects.Instance(
flavor=objects.Flavor(root_gb=2, ephemeral_gb=0),
uuid=uuids.instance)
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=1,
ephemeral_gb=0)
self.vmops.migrate_disk_and_power_off(None, instance, None,
flavor, None)
self.assertFalse(migrate_up.called)
self.assertTrue(migrate_down.called)
def test_migrate_disk_and_power_off_works_up(self,
migrate_up, migrate_down, *mocks):
instance = objects.Instance(
flavor=objects.Flavor(root_gb=1,
ephemeral_gb=1),
uuid=uuids.instance)
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=2,
ephemeral_gb=2)
self.vmops.migrate_disk_and_power_off(None, instance, None,
flavor, None)
self.assertFalse(migrate_down.called)
self.assertTrue(migrate_up.called)
def test_migrate_disk_and_power_off_resize_down_ephemeral_fails(self,
migrate_up, migrate_down, *mocks):
instance = objects.Instance(flavor=objects.Flavor(ephemeral_gb=2))
flavor = fake_flavor.fake_flavor_obj(self.context, ephemeral_gb=1)
self.assertRaises(exception.ResizeError,
self.vmops.migrate_disk_and_power_off,
None, instance, None, flavor, None)
@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
@mock.patch.object(vm_utils, 'migrate_vhd')
@mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown')
@mock.patch.object(vm_utils, 'get_all_vdi_uuids_for_vm')
@mock.patch.object(vmops.VMOps, '_update_instance_progress')
@mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label')
class MigrateDiskResizingUpTestCase(VMOpsTestBase):
def _fake_snapshot_attached_here(self, session, instance, vm_ref, label,
userdevice, post_snapshot_callback):
self.assertIsInstance(instance, dict)
if userdevice == '0':
self.assertEqual("vm_ref", vm_ref)
self.assertEqual("fake-snapshot", label)
yield ["leaf", "parent", "grandp"]
else:
leaf = userdevice + "-leaf"
parent = userdevice + "-parent"
yield [leaf, parent]
@mock.patch.object(volume_utils, 'is_booted_from_volume',
return_value=False)
def test_migrate_disk_resizing_up_works_no_ephemeral(self,
mock_is_booted_from_volume,
mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
mock_shutdown, mock_migrate_vhd, mock_get_vdi_for_vm):
context = "ctxt"
instance = {"name": "fake", "uuid": "uuid"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
mock_get_all_vdi_uuids.return_value = None
mock_get_vdi_for_vm.return_value = ({}, {"uuid": "root"})
with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
self._fake_snapshot_attached_here):
self.vmops._migrate_disk_resizing_up(context, instance, dest,
vm_ref, sr_path)
mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session,
vm_ref, min_userdevice=4)
mock_apply_orig.assert_called_once_with(instance, vm_ref)
mock_shutdown.assert_called_once_with(instance, vm_ref)
m_vhd_expected = [mock.call(self.vmops._session, instance, "parent",
dest, sr_path, 1),
mock.call(self.vmops._session, instance, "grandp",
dest, sr_path, 2),
mock.call(self.vmops._session, instance, "root",
dest, sr_path, 0)]
self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list)
prog_expected = [
mock.call(context, instance, 1, 5),
mock.call(context, instance, 2, 5),
mock.call(context, instance, 3, 5),
mock.call(context, instance, 4, 5)
# 5/5: step to be executed by finish migration.
]
self.assertEqual(prog_expected, mock_update_progress.call_args_list)
@mock.patch.object(volume_utils, 'is_booted_from_volume',
return_value=False)
def test_migrate_disk_resizing_up_works_with_two_ephemeral(self,
mock_is_booted_from_volume,
mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
mock_shutdown, mock_migrate_vhd, mock_get_vdi_for_vm):
context = "ctxt"
instance = {"name": "fake", "uuid": "uuid"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
mock_get_all_vdi_uuids.return_value = ["vdi-eph1", "vdi-eph2"]
mock_get_vdi_for_vm.side_effect = [({}, {"uuid": "root"}),
({}, {"uuid": "4-root"}),
({}, {"uuid": "5-root"})]
with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
self._fake_snapshot_attached_here):
self.vmops._migrate_disk_resizing_up(context, instance, dest,
vm_ref, sr_path)
mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session,
vm_ref, min_userdevice=4)
mock_apply_orig.assert_called_once_with(instance, vm_ref)
mock_shutdown.assert_called_once_with(instance, vm_ref)
m_vhd_expected = [mock.call(self.vmops._session, instance,
"parent", dest, sr_path, 1),
mock.call(self.vmops._session, instance,
"grandp", dest, sr_path, 2),
mock.call(self.vmops._session, instance,
"4-parent", dest, sr_path, 1, 1),
mock.call(self.vmops._session, instance,
"5-parent", dest, sr_path, 1, 2),
mock.call(self.vmops._session, instance,
"root", dest, sr_path, 0),
mock.call(self.vmops._session, instance,
"4-root", dest, sr_path, 0, 1),
mock.call(self.vmops._session, instance,
"5-root", dest, sr_path, 0, 2)]
self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list)
prog_expected = [
mock.call(context, instance, 1, 5),
mock.call(context, instance, 2, 5),
mock.call(context, instance, 3, 5),
mock.call(context, instance, 4, 5)
# 5/5: step to be executed by finish migration.
]
self.assertEqual(prog_expected, mock_update_progress.call_args_list)
@mock.patch.object(volume_utils, 'is_booted_from_volume',
return_value=True)
def test_migrate_disk_resizing_up_booted_from_volume(self,
mock_is_booted_from_volume,
mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
mock_shutdown, mock_migrate_vhd, mock_get_vdi_for_vm):
context = "ctxt"
instance = {"name": "fake", "uuid": "uuid"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
mock_get_all_vdi_uuids.return_value = ["vdi-eph1", "vdi-eph2"]
mock_get_vdi_for_vm.side_effect = [({}, {"uuid": "4-root"}),
({}, {"uuid": "5-root"})]
with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
self._fake_snapshot_attached_here):
self.vmops._migrate_disk_resizing_up(context, instance, dest,
vm_ref, sr_path)
mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session,
vm_ref, min_userdevice=4)
mock_apply_orig.assert_called_once_with(instance, vm_ref)
mock_shutdown.assert_called_once_with(instance, vm_ref)
m_vhd_expected = [mock.call(self.vmops._session, instance,
"4-parent", dest, sr_path, 1, 1),
mock.call(self.vmops._session, instance,
"5-parent", dest, sr_path, 1, 2),
mock.call(self.vmops._session, instance,
"4-root", dest, sr_path, 0, 1),
mock.call(self.vmops._session, instance,
"5-root", dest, sr_path, 0, 2)]
self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list)
prog_expected = [
mock.call(context, instance, 1, 5),
mock.call(context, instance, 2, 5),
mock.call(context, instance, 3, 5),
mock.call(context, instance, 4, 5)
# 5/5: step to be executed by finish migration.
]
self.assertEqual(prog_expected, mock_update_progress.call_args_list)
@mock.patch.object(vmops.VMOps, '_restore_orig_vm_and_cleanup_orphan')
@mock.patch.object(volume_utils, 'is_booted_from_volume',
return_value=False)
def test_migrate_disk_resizing_up_rollback(self,
mock_is_booted_from_volume,
mock_restore,
mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
mock_shutdown, mock_migrate_vhd, mock_get_vdi_for_vm):
context = "ctxt"
instance = {"name": "fake", "uuid": "fake"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
mock_migrate_vhd.side_effect = test.TestingException
mock_restore.side_effect = test.TestingException
with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
self._fake_snapshot_attached_here):
self.assertRaises(exception.InstanceFaultRollback,
self.vmops._migrate_disk_resizing_up,
context, instance, dest, vm_ref, sr_path)
mock_apply_orig.assert_called_once_with(instance, vm_ref)
mock_restore.assert_called_once_with(instance)
mock_migrate_vhd.assert_called_once_with(self.vmops._session,
instance, "parent", dest, sr_path, 1)
class CreateVMRecordTestCase(VMOpsTestBase):
@mock.patch.object(vm_utils, 'determine_vm_mode')
@mock.patch.object(vm_utils, 'get_vm_device_id')
@mock.patch.object(vm_utils, 'create_vm')
def test_create_vm_record_with_vm_device_id(self, mock_create_vm,
mock_get_vm_device_id, mock_determine_vm_mode):
context = "context"
instance = objects.Instance(vm_mode="vm_mode", uuid=uuids.instance)
name_label = "dummy"
disk_image_type = "vhd"
kernel_file = "kernel"
ramdisk_file = "ram"
device_id = "0002"
image_properties = {"xenapi_device_id": device_id}
image_meta = objects.ImageMeta.from_dict(
{"properties": image_properties})
rescue = False
session = "session"
self.vmops._session = session
mock_get_vm_device_id.return_value = device_id
mock_determine_vm_mode.return_value = "vm_mode"
self.vmops._create_vm_record(context, instance, name_label,
disk_image_type, kernel_file, ramdisk_file, image_meta, rescue)
mock_get_vm_device_id.assert_called_with(session, image_meta)
mock_create_vm.assert_called_with(session, instance, name_label,
kernel_file, ramdisk_file, False, device_id)
class BootableTestCase(VMOpsTestBase):
def setUp(self):
super(BootableTestCase, self).setUp()
self.instance = {"name": "test", "uuid": "fake"}
vm_rec, self.vm_ref = self.create_vm('test')
# sanity check bootlock is initially disabled:
self.assertEqual({}, vm_rec['blocked_operations'])
def _get_blocked(self):
vm_rec = self._session.call_xenapi("VM.get_record", self.vm_ref)
return vm_rec['blocked_operations']
def test_acquire_bootlock(self):
self.vmops._acquire_bootlock(self.vm_ref)
blocked = self._get_blocked()
self.assertIn('start', blocked)
def test_release_bootlock(self):
self.vmops._acquire_bootlock(self.vm_ref)
self.vmops._release_bootlock(self.vm_ref)
blocked = self._get_blocked()
self.assertNotIn('start', blocked)
def test_set_bootable(self):
self.vmops.set_bootable(self.instance, True)
blocked = self._get_blocked()
self.assertNotIn('start', blocked)
def test_set_not_bootable(self):
self.vmops.set_bootable(self.instance, False)
blocked = self._get_blocked()
self.assertIn('start', blocked)
@mock.patch.object(vm_utils, 'update_vdi_virtual_size', autospec=True)
class ResizeVdisTestCase(VMOpsTestBase):
def test_dont_resize_root_volumes_osvol_false(self, mock_resize):
instance = fake_instance.fake_instance_obj(
None, flavor=objects.Flavor(root_gb=20))
vdis = {'root': {'osvol': False, 'ref': 'vdi_ref'}}
self.vmops._resize_up_vdis(instance, vdis)
self.assertTrue(mock_resize.called)
def test_dont_resize_root_volumes_osvol_true(self, mock_resize):
instance = fake_instance.fake_instance_obj(
None, flavor=objects.Flavor(root_gb=20))
vdis = {'root': {'osvol': True}}
self.vmops._resize_up_vdis(instance, vdis)
self.assertFalse(mock_resize.called)
def test_dont_resize_root_volumes_no_osvol(self, mock_resize):
instance = fake_instance.fake_instance_obj(
None, flavor=objects.Flavor(root_gb=20))
vdis = {'root': {}}
self.vmops._resize_up_vdis(instance, vdis)
self.assertFalse(mock_resize.called)
@mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
def test_ensure_ephemeral_resize_with_root_volume(self, mock_sizes,
mock_resize):
mock_sizes.return_value = [2000, 1000]
instance = fake_instance.fake_instance_obj(
None, flavor=objects.Flavor(root_gb=20, ephemeral_gb=20))
ephemerals = {"4": {"ref": 4}, "5": {"ref": 5}}
vdis = {'root': {'osvol': True, 'ref': 'vdi_ref'},
'ephemerals': ephemerals}
with mock.patch.object(vm_utils, 'generate_single_ephemeral',
autospec=True) as g:
self.vmops._resize_up_vdis(instance, vdis)
self.assertEqual([mock.call(self.vmops._session, instance, 4,
2000),
mock.call(self.vmops._session, instance, 5,
1000)],
mock_resize.call_args_list)
self.assertFalse(g.called)
def test_resize_up_vdis_root(self, mock_resize):
instance = objects.Instance(flavor=objects.Flavor(root_gb=20,
ephemeral_gb=0))
self.vmops._resize_up_vdis(instance, {"root": {"ref": "vdi_ref"}})
mock_resize.assert_called_once_with(self.vmops._session, instance,
"vdi_ref", 20)
def test_resize_up_vdis_zero_disks(self, mock_resize):
instance = objects.Instance(flavor=objects.Flavor(root_gb=0,
ephemeral_gb=0))
self.vmops._resize_up_vdis(instance, {"root": {}})
self.assertFalse(mock_resize.called)
def test_resize_up_vdis_no_vdis_like_initial_spawn(self, mock_resize):
instance = objects.Instance(flavor=objects.Flavor(root_gb=0,
ephemeral_gb=3000))
vdis = {}
self.vmops._resize_up_vdis(instance, vdis)
self.assertFalse(mock_resize.called)
@mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
def test_resize_up_vdis_ephemeral(self, mock_sizes, mock_resize):
mock_sizes.return_value = [2000, 1000]
instance = objects.Instance(flavor=objects.Flavor(root_gb=0,
ephemeral_gb=3000))
ephemerals = {"4": {"ref": 4}, "5": {"ref": 5}}
vdis = {"ephemerals": ephemerals}
self.vmops._resize_up_vdis(instance, vdis)
mock_sizes.assert_called_once_with(3000)
expected = [mock.call(self.vmops._session, instance, 4, 2000),
mock.call(self.vmops._session, instance, 5, 1000)]
self.assertEqual(expected, mock_resize.call_args_list)
@mock.patch.object(vm_utils, 'generate_single_ephemeral')
@mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
def test_resize_up_vdis_ephemeral_with_generate(self, mock_sizes,
mock_generate,
mock_resize):
mock_sizes.return_value = [2000, 1000]
instance = objects.Instance(uuid=uuids.instance,
flavor=objects.Flavor(root_gb=0,
ephemeral_gb=3000))
ephemerals = {"4": {"ref": 4}}
vdis = {"ephemerals": ephemerals}
self.vmops._resize_up_vdis(instance, vdis)
mock_sizes.assert_called_once_with(3000)
mock_resize.assert_called_once_with(self.vmops._session, instance,
4, 2000)
mock_generate.assert_called_once_with(self.vmops._session, instance,
None, 5, 1000)
@mock.patch.object(vm_utils, 'remove_old_snapshots')
class CleanupFailedSnapshotTestCase(VMOpsTestBase):
def test_post_interrupted_snapshot_cleanup(self, mock_remove):
self.vmops._get_vm_opaque_ref = mock.Mock()
self.vmops._get_vm_opaque_ref.return_value = "vm_ref"
self.vmops.post_interrupted_snapshot_cleanup("context", "instance")
mock_remove.assert_called_once_with(self.vmops._session,
"instance", "vm_ref")
class XenstoreCallsTestCase(VMOpsTestBase):
"""Test cases for Read/Write/Delete/Update xenstore calls
from vmops.
"""
@mock.patch.object(vmops.VMOps, '_get_dom_id')
@mock.patch.object(host_xenstore, 'read_record')
def test_read_from_xenstore(self, mock_read_record, mock_dom_id):
mock_read_record.return_value = "fake_xapi_return"
mock_dom_id.return_value = "fake_dom_id"
fake_instance = {"name": "fake_instance"}
path = "attr/PVAddons/MajorVersion"
self.assertEqual("fake_xapi_return",
self.vmops._read_from_xenstore(fake_instance, path,
vm_ref="vm_ref"))
mock_dom_id.assert_called_once_with(fake_instance, "vm_ref")
@mock.patch.object(vmops.VMOps, '_get_dom_id')
@mock.patch.object(host_xenstore, 'read_record')
def test_read_from_xenstore_ignore_missing_path(self, mock_read_record,
mock_dom_id):
mock_read_record.return_value = "fake_xapi_return"
mock_dom_id.return_value = "fake_dom_id"
fake_instance = {"name": "fake_instance"}
path = "attr/PVAddons/MajorVersion"
self.vmops._read_from_xenstore(fake_instance, path, vm_ref="vm_ref")
mock_read_record.assert_called_once_with(
self._session, "fake_dom_id", path, ignore_missing_path=True)
@mock.patch.object(vmops.VMOps, '_get_dom_id')
@mock.patch.object(host_xenstore, 'read_record')
def test_read_from_xenstore_missing_path(self, mock_read_record,
mock_dom_id):
mock_read_record.return_value = "fake_xapi_return"
mock_dom_id.return_value = "fake_dom_id"
fake_instance = {"name": "fake_instance"}
path = "attr/PVAddons/MajorVersion"
self.vmops._read_from_xenstore(fake_instance, path, vm_ref="vm_ref",
ignore_missing_path=False)
mock_read_record.assert_called_once_with(self._session, "fake_dom_id",
path,
ignore_missing_path=False)
class LiveMigrateTestCase(VMOpsTestBase):
@mock.patch.object(vmops.VMOps, '_get_network_ref')
@mock.patch.object(vmops.VMOps, '_ensure_host_in_aggregate')
def _test_check_can_live_migrate_destination_shared_storage(
self,
shared,
mock_ensure_host,
mock_net_ref):
fake_instance = {"name": "fake_instance", "host": "fake_host"}
block_migration = None
disk_over_commit = False
ctxt = 'ctxt'
mock_net_ref.return_value = 'fake_net_ref'
with mock.patch.object(self._session, 'get_rec') as fake_sr_rec:
fake_sr_rec.return_value = {'shared': shared}
migrate_data_ret = self.vmops.check_can_live_migrate_destination(
ctxt, fake_instance, block_migration, disk_over_commit)
if shared:
self.assertFalse(migrate_data_ret.block_migration)
else:
self.assertTrue(migrate_data_ret.block_migration)
self.assertEqual({'': 'fake_net_ref'},
migrate_data_ret.vif_uuid_map)
def test_check_can_live_migrate_destination_shared_storage(self):
self._test_check_can_live_migrate_destination_shared_storage(True)
def test_check_can_live_migrate_destination_shared_storage_false(self):
self._test_check_can_live_migrate_destination_shared_storage(False)
@mock.patch.object(vmops.VMOps, '_get_network_ref')
@mock.patch.object(vmops.VMOps, '_ensure_host_in_aggregate',
side_effect=exception.MigrationPreCheckError(reason=""))
def test_check_can_live_migrate_destination_block_migration(
self,
mock_ensure_host,
mock_net_ref):
fake_instance = {"name": "fake_instance", "host": "fake_host"}
block_migration = None
disk_over_commit = False
ctxt = 'ctxt'
mock_net_ref.return_value = 'fake_net_ref'
migrate_data_ret = self.vmops.check_can_live_migrate_destination(
ctxt, fake_instance, block_migration, disk_over_commit)
self.assertTrue(migrate_data_ret.block_migration)
self.assertEqual(vm_utils.safe_find_sr(self._session),
migrate_data_ret.destination_sr_ref)
self.assertEqual({'value': 'fake_migrate_data'},
migrate_data_ret.migrate_send_data)
self.assertEqual({'': 'fake_net_ref'},
migrate_data_ret.vif_uuid_map)
@mock.patch.object(vmops.objects.AggregateList, 'get_by_host')
def test_get_host_uuid_from_aggregate_no_aggr(self, mock_get_by_host):
mock_get_by_host.return_value = objects.AggregateList(objects=[])
context = "ctx"
hostname = "other_host"
self.assertRaises(exception.MigrationPreCheckError,
self.vmops._get_host_uuid_from_aggregate,
context, hostname)
@mock.patch.object(vmops.objects.AggregateList, 'get_by_host')
def test_get_host_uuid_from_aggregate_bad_aggr(self, mock_get_by_host):
context = "ctx"
hostname = "other_host"
fake_aggregate_obj = objects.Aggregate(hosts=['fake'],
metadata={'this': 'that'})
fake_aggr_list = objects.AggregateList(objects=[fake_aggregate_obj])
mock_get_by_host.return_value = fake_aggr_list
self.assertRaises(exception.MigrationPreCheckError,
self.vmops._get_host_uuid_from_aggregate,
context, hostname)
@mock.patch.object(vmops.VMOps, 'create_interim_networks')
@mock.patch.object(vmops.VMOps, 'connect_block_device_volumes')
def test_pre_live_migration(self, mock_connect, mock_create):
migrate_data = objects.XenapiLiveMigrateData()
migrate_data.block_migration = True
sr_uuid_map = {"sr_uuid": "sr_ref"}
vif_uuid_map = {"neutron_vif_uuid": "dest_network_ref"}
mock_connect.return_value = {"sr_uuid": "sr_ref"}
mock_create.return_value = {"neutron_vif_uuid": "dest_network_ref"}
result = self.vmops.pre_live_migration(
None, None, "bdi", "fake_network_info", None, migrate_data)
self.assertTrue(result.block_migration)
self.assertEqual(result.sr_uuid_map, sr_uuid_map)
self.assertEqual(result.vif_uuid_map, vif_uuid_map)
mock_connect.assert_called_once_with("bdi")
mock_create.assert_called_once_with("fake_network_info")
@mock.patch.object(vmops.VMOps, '_delete_networks_and_bridges')
def test_post_live_migration_at_source(self, mock_delete):
self.vmops.post_live_migration_at_source('fake_context',
'fake_instance',
'fake_network_info')
mock_delete.assert_called_once_with('fake_instance',
'fake_network_info')
class LiveMigrateFakeVersionTestCase(VMOpsTestBase):
@mock.patch.object(vmops.VMOps, '_pv_device_reported')
@mock.patch.object(vmops.VMOps, '_pv_driver_version_reported')
@mock.patch.object(vmops.VMOps, '_write_fake_pv_version')
def test_ensure_pv_driver_info_for_live_migration(
self,
mock_write_fake_pv_version,
mock_pv_driver_version_reported,
mock_pv_device_reported):
mock_pv_device_reported.return_value = True
mock_pv_driver_version_reported.return_value = False
fake_instance = {"name": "fake_instance"}
self.vmops._ensure_pv_driver_info_for_live_migration(fake_instance,
"vm_rec")
mock_write_fake_pv_version.assert_called_once_with(fake_instance,
"vm_rec")
@mock.patch.object(vmops.VMOps, '_read_from_xenstore')
def test_pv_driver_version_reported_None(self, fake_read_from_xenstore):
fake_read_from_xenstore.return_value = '"None"'
fake_instance = {"name": "fake_instance"}
self.assertFalse(self.vmops._pv_driver_version_reported(fake_instance,
"vm_ref"))
@mock.patch.object(vmops.VMOps, '_read_from_xenstore')
def test_pv_driver_version_reported(self, fake_read_from_xenstore):
fake_read_from_xenstore.return_value = '6.2.0'
fake_instance = {"name": "fake_instance"}
self.assertTrue(self.vmops._pv_driver_version_reported(fake_instance,
"vm_ref"))
@mock.patch.object(vmops.VMOps, '_read_from_xenstore')
def test_pv_device_reported(self, fake_read_from_xenstore):
with mock.patch.object(self._session.VM, 'get_record') as fake_vm_rec:
fake_vm_rec.return_value = {'VIFs': 'fake-vif-object'}
with mock.patch.object(self._session, 'call_xenapi') as fake_call:
fake_call.return_value = {'device': '0'}
fake_read_from_xenstore.return_value = '4'
fake_instance = {"name": "fake_instance"}
self.assertTrue(self.vmops._pv_device_reported(fake_instance,
"vm_ref"))
@mock.patch.object(vmops.VMOps, '_read_from_xenstore')
def test_pv_device_not_reported(self, fake_read_from_xenstore):
with mock.patch.object(self._session.VM, 'get_record') as fake_vm_rec:
fake_vm_rec.return_value = {'VIFs': 'fake-vif-object'}
with mock.patch.object(self._session, 'call_xenapi') as fake_call:
fake_call.return_value = {'device': '0'}
fake_read_from_xenstore.return_value = '0'
fake_instance = {"name": "fake_instance"}
self.assertFalse(self.vmops._pv_device_reported(fake_instance,
"vm_ref"))
@mock.patch.object(vmops.VMOps, '_read_from_xenstore')
def test_pv_device_None_reported(self, fake_read_from_xenstore):
with mock.patch.object(self._session.VM, 'get_record') as fake_vm_rec:
fake_vm_rec.return_value = {'VIFs': 'fake-vif-object'}
with mock.patch.object(self._session, 'call_xenapi') as fake_call:
fake_call.return_value = {'device': '0'}
fake_read_from_xenstore.return_value = '"None"'
fake_instance = {"name": "fake_instance"}
self.assertFalse(self.vmops._pv_device_reported(fake_instance,
"vm_ref"))
@mock.patch.object(vmops.VMOps, '_write_to_xenstore')
def test_write_fake_pv_version(self, fake_write_to_xenstore):
fake_write_to_xenstore.return_value = 'fake_return'
fake_instance = {"name": "fake_instance"}
with mock.patch.object(self._session, 'product_version') as version:
version.return_value = ('6', '2', '0')
self.assertIsNone(self.vmops._write_fake_pv_version(fake_instance,
"vm_ref"))
class LiveMigrateHelperTestCase(VMOpsTestBase):
def test_connect_block_device_volumes_none(self):
self.assertEqual({}, self.vmops.connect_block_device_volumes(None))
@mock.patch.object(volumeops.VolumeOps, "connect_volume")
def test_connect_block_device_volumes_calls_connect(self, mock_connect):
with mock.patch.object(self.vmops._session,
"call_xenapi") as mock_session:
mock_connect.return_value = ("sr_uuid", None)
mock_session.return_value = "sr_ref"
bdm = {"connection_info": "c_info"}
bdi = {"block_device_mapping": [bdm]}
result = self.vmops.connect_block_device_volumes(bdi)
self.assertEqual({'sr_uuid': 'sr_ref'}, result)
mock_connect.assert_called_once_with("c_info")
mock_session.assert_called_once_with("SR.get_by_uuid",
"sr_uuid")
@mock.patch.object(volumeops.VolumeOps, "connect_volume")
@mock.patch.object(volume_utils, 'forget_sr')
def test_connect_block_device_volumes_calls_forget_sr(self, mock_forget,
mock_connect):
bdms = [{'connection_info': 'info1'},
{'connection_info': 'info2'}]
def fake_connect(connection_info):
expected = bdms[mock_connect.call_count - 1]['connection_info']
self.assertEqual(expected, connection_info)
if mock_connect.call_count == 2:
raise exception.VolumeDriverNotFound(driver_type='123')
return ('sr_uuid_1', None)
def fake_call_xenapi(method, uuid):
self.assertEqual('sr_uuid_1', uuid)
return 'sr_ref_1'
mock_connect.side_effect = fake_connect
with mock.patch.object(self.vmops._session, "call_xenapi",
side_effect=fake_call_xenapi):
self.assertRaises(exception.VolumeDriverNotFound,
self.vmops.connect_block_device_volumes,
{'block_device_mapping': bdms})
mock_forget.assert_called_once_with(self.vmops._session,
'sr_ref_1')
def _call_live_migrate_command_with_migrate_send_data(self, migrate_data):
command_name = 'test_command'
vm_ref = "vm_ref"
def side_effect(method, *args):
if method == "SR.get_by_uuid":
return "sr_ref_new"
xmlrpclib.dumps(args, method, allow_none=1)
with mock.patch.object(self.vmops,
"_generate_vdi_map") as mock_gen_vdi_map, \
mock.patch.object(self.vmops._session,
'call_xenapi') as mock_call_xenapi, \
mock.patch.object(self.vmops,
"_generate_vif_network_map") as mock_vif_map:
mock_call_xenapi.side_effect = side_effect
mock_gen_vdi_map.side_effect = [
{"vdi": "sr_ref"}, {"vdi": "sr_ref_2"}]
mock_vif_map.return_value = {"vif_ref1": "dest_net_ref"}
self.vmops._call_live_migrate_command(command_name,
vm_ref, migrate_data)
expect_vif_map = {}
if 'vif_uuid_map' in migrate_data:
expect_vif_map.update({"vif_ref1": "dest_net_ref"})
expected_vdi_map = {'vdi': 'sr_ref'}
if 'sr_uuid_map' in migrate_data:
expected_vdi_map = {'vdi': 'sr_ref_2'}
self.assertEqual(mock_call_xenapi.call_args_list[-1],
mock.call(command_name, vm_ref,
migrate_data.migrate_send_data, True,
expected_vdi_map, expect_vif_map, {}))
self.assertEqual(mock_gen_vdi_map.call_args_list[0],
mock.call(migrate_data.destination_sr_ref, vm_ref))
if 'sr_uuid_map' in migrate_data:
self.assertEqual(mock_gen_vdi_map.call_args_list[1],
mock.call(migrate_data.sr_uuid_map["sr_uuid2"], vm_ref,
"sr_ref_new"))
def test_call_live_migrate_command_with_full_data(self):
migrate_data = objects.XenapiLiveMigrateData()
migrate_data.migrate_send_data = {"foo": "bar"}
migrate_data.destination_sr_ref = "sr_ref"
migrate_data.sr_uuid_map = {"sr_uuid2": "sr_ref_3"}
migrate_data.vif_uuid_map = {"vif_id": "dest_net_ref"}
self._call_live_migrate_command_with_migrate_send_data(migrate_data)
def test_call_live_migrate_command_with_no_sr_uuid_map(self):
migrate_data = objects.XenapiLiveMigrateData()
migrate_data.migrate_send_data = {"foo": "baz"}
migrate_data.destination_sr_ref = "sr_ref"
self._call_live_migrate_command_with_migrate_send_data(migrate_data)
def test_call_live_migrate_command_with_no_migrate_send_data(self):
migrate_data = objects.XenapiLiveMigrateData()
self.assertRaises(exception.InvalidParameterValue,
self._call_live_migrate_command_with_migrate_send_data,
migrate_data)
def test_generate_vif_network_map(self):
with mock.patch.object(self._session.VIF,
'get_other_config') as mock_other_config, \
mock.patch.object(self._session.VM,
'get_VIFs') as mock_get_vif:
mock_other_config.side_effect = [{'neutron-port-id': 'vif_id_a'},
{'neutron-port-id': 'vif_id_b'}]
mock_get_vif.return_value = ['vif_ref1', 'vif_ref2']
vif_uuid_map = {'vif_id_b': 'dest_net_ref2',
'vif_id_a': 'dest_net_ref1'}
vif_map = self.vmops._generate_vif_network_map('vm_ref',
vif_uuid_map)
expected = {'vif_ref1': 'dest_net_ref1',
'vif_ref2': 'dest_net_ref2'}
self.assertEqual(vif_map, expected)
def test_generate_vif_network_map_default_net(self):
with mock.patch.object(self._session.VIF,
'get_other_config') as mock_other_config, \
mock.patch.object(self._session.VM,
'get_VIFs') as mock_get_vif:
mock_other_config.side_effect = [{'nicira-iface-id': 'vif_id_a'},
{'nicira-iface-id': 'vif_id_b'}]
mock_get_vif.return_value = ['vif_ref1']
vif_uuid_map = {'': 'default_net_ref'}
vif_map = self.vmops._generate_vif_network_map('vm_ref',
vif_uuid_map)
expected = {'vif_ref1': 'default_net_ref'}
self.assertEqual(vif_map, expected)
def test_generate_vif_network_map_exception(self):
with mock.patch.object(self._session.VIF,
'get_other_config') as mock_other_config, \
mock.patch.object(self._session.VM,
'get_VIFs') as mock_get_vif:
mock_other_config.side_effect = [{'neutron-port-id': 'vif_id_a'},
{'neutron-port-id': 'vif_id_b'}]
mock_get_vif.return_value = ['vif_ref1', 'vif_ref2']
vif_uuid_map = {'vif_id_c': 'dest_net_ref2',
'vif_id_d': 'dest_net_ref1'}
self.assertRaises(exception.MigrationError,
self.vmops._generate_vif_network_map,
'vm_ref', vif_uuid_map)
def test_generate_vif_network_map_exception_no_iface(self):
with mock.patch.object(self._session.VIF,
'get_other_config') as mock_other_config, \
mock.patch.object(self._session.VM,
'get_VIFs') as mock_get_vif:
mock_other_config.return_value = {}
mock_get_vif.return_value = ['vif_ref1']
vif_uuid_map = {}
self.assertRaises(exception.MigrationError,
self.vmops._generate_vif_network_map,
'vm_ref', vif_uuid_map)
def test_delete_networks_and_bridges(self):
self.vmops.vif_driver = mock.Mock()
network_info = ['fake_vif']
self.vmops._delete_networks_and_bridges('fake_instance', network_info)
self.vmops.vif_driver.delete_network_and_bridge.\
assert_called_once_with('fake_instance', 'fake_vif')
def test_create_interim_networks(self):
class FakeVifDriver(object):
def create_vif_interim_network(self, vif):
if vif['id'] == "vif_1":
return "network_ref_1"
if vif['id'] == "vif_2":
return "network_ref_2"
network_info = [{'id': "vif_1"}, {'id': 'vif_2'}]
self.vmops.vif_driver = FakeVifDriver()
vif_map = self.vmops.create_interim_networks(network_info)
self.assertEqual(vif_map, {'vif_1': 'network_ref_1',
'vif_2': 'network_ref_2'})
class RollbackLiveMigrateDestinationTestCase(VMOpsTestBase):
@mock.patch.object(vmops.VMOps, '_delete_networks_and_bridges')
@mock.patch.object(volume_utils, 'find_sr_by_uuid', return_value='sr_ref')
@mock.patch.object(volume_utils, 'forget_sr')
def test_rollback_dest_calls_sr_forget(self, forget_sr, sr_ref,
delete_networks_bridges):
block_device_info = {'block_device_mapping': [{'connection_info':
{'data': {'volume_id': 'fake-uuid',
'target_iqn': 'fake-iqn',
'target_portal': 'fake-portal'}}}]}
network_info = [{'id': 'vif1'}]
self.vmops.rollback_live_migration_at_destination('instance',
network_info,
block_device_info)
forget_sr.assert_called_once_with(self.vmops._session, 'sr_ref')
delete_networks_bridges.assert_called_once_with(
'instance', [{'id': 'vif1'}])
@mock.patch.object(vmops.VMOps, '_delete_networks_and_bridges')
@mock.patch.object(volume_utils, 'forget_sr')
@mock.patch.object(volume_utils, 'find_sr_by_uuid',
side_effect=test.TestingException)
def test_rollback_dest_handles_exception(self, find_sr_ref, forget_sr,
delete_networks_bridges):
block_device_info = {'block_device_mapping': [{'connection_info':
{'data': {'volume_id': 'fake-uuid',
'target_iqn': 'fake-iqn',
'target_portal': 'fake-portal'}}}]}
network_info = [{'id': 'vif1'}]
self.vmops.rollback_live_migration_at_destination('instance',
network_info,
block_device_info)
self.assertFalse(forget_sr.called)
delete_networks_bridges.assert_called_once_with(
'instance', [{'id': 'vif1'}])
@mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown')
@mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label')
@mock.patch.object(vmops.VMOps, '_update_instance_progress')
@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
@mock.patch.object(vm_utils, 'resize_disk')
@mock.patch.object(vm_utils, 'migrate_vhd')
@mock.patch.object(vm_utils, 'destroy_vdi')
class MigrateDiskResizingDownTestCase(VMOpsTestBase):
def test_migrate_disk_resizing_down_works_no_ephemeral(
self,
mock_destroy_vdi,
mock_migrate_vhd,
mock_resize_disk,
mock_get_vdi_for_vm_safely,
mock_update_instance_progress,
mock_apply_orig_vm_name_label,
mock_resize_ensure_vm_is_shutdown):
context = "ctx"
instance = {"name": "fake", "uuid": "uuid"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
instance_type = dict(root_gb=1)
old_vdi_ref = "old_ref"
new_vdi_ref = "new_ref"
new_vdi_uuid = "new_uuid"
mock_get_vdi_for_vm_safely.return_value = (old_vdi_ref, None)
mock_resize_disk.return_value = (new_vdi_ref, new_vdi_uuid)
self.vmops._migrate_disk_resizing_down(context, instance, dest,
instance_type, vm_ref, sr_path)
mock_get_vdi_for_vm_safely.assert_called_once_with(
self.vmops._session,
vm_ref)
mock_resize_ensure_vm_is_shutdown.assert_called_once_with(
instance, vm_ref)
mock_apply_orig_vm_name_label.assert_called_once_with(
instance, vm_ref)
mock_resize_disk.assert_called_once_with(
self.vmops._session,
instance,
old_vdi_ref,
instance_type)
mock_migrate_vhd.assert_called_once_with(
self.vmops._session,
instance,
new_vdi_uuid,
dest,
sr_path, 0)
mock_destroy_vdi.assert_called_once_with(
self.vmops._session,
new_vdi_ref)
prog_expected = [
mock.call(context, instance, 1, 5),
mock.call(context, instance, 2, 5),
mock.call(context, instance, 3, 5),
mock.call(context, instance, 4, 5)
# 5/5: step to be executed by finish migration.
]
self.assertEqual(prog_expected,
mock_update_instance_progress.call_args_list)
class GetVdisForInstanceTestCase(VMOpsTestBase):
"""Tests get_vdis_for_instance utility method."""
def setUp(self):
super(GetVdisForInstanceTestCase, self).setUp()
self.context = context.get_admin_context()
self.context.auth_token = 'auth_token'
self.session = mock.Mock()
self.vmops._session = self.session
self.instance = fake_instance.fake_instance_obj(self.context)
self.name_label = 'name'
self.image = 'fake_image_id'
@mock.patch.object(volumeops.VolumeOps, "connect_volume",
return_value=("sr", "vdi_uuid"))
def test_vdis_for_instance_bdi_password_scrubbed(self, get_uuid_mock):
# setup fake data
data = {'name_label': self.name_label,
'sr_uuid': 'fake',
'auth_password': 'scrubme'}
bdm = [{'mount_device': '/dev/vda',
'connection_info': {'data': data}}]
bdi = {'root_device_name': 'vda',
'block_device_mapping': bdm}
# Tests that the parameters to the to_xml method are sanitized for
# passwords when logged.
def fake_debug(*args, **kwargs):
if 'auth_password' in args[0]:
self.assertNotIn('scrubme', args[0])
fake_debug.matched = True
fake_debug.matched = False
with mock.patch.object(vmops.LOG, 'debug',
side_effect=fake_debug) as debug_mock:
vdis = self.vmops._get_vdis_for_instance(self.context,
self.instance, self.name_label, self.image,
image_type=4, block_device_info=bdi)
self.assertEqual(1, len(vdis))
get_uuid_mock.assert_called_once_with({"data": data})
# we don't care what the log message is, we just want to make sure
# our stub method is called which asserts the password is scrubbed
self.assertTrue(debug_mock.called)
self.assertTrue(fake_debug.matched)
class AttachInterfaceTestCase(VMOpsTestBase):
"""Test VIF hot plug/unplug"""
def setUp(self):
super(AttachInterfaceTestCase, self).setUp()
self.vmops.vif_driver = mock.Mock()
self.fake_vif = {'id': '12345'}
self.fake_instance = mock.Mock()
self.fake_instance.uuid = '6478'
@mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref')
def test_attach_interface(self, mock_get_vm_opaque_ref):
mock_get_vm_opaque_ref.return_value = 'fake_vm_ref'
with mock.patch.object(self._session.VM, 'get_allowed_VIF_devices')\
as fake_devices:
fake_devices.return_value = [2, 3, 4]
self.vmops.attach_interface(self.fake_instance, self.fake_vif)
fake_devices.assert_called_once_with('fake_vm_ref')
mock_get_vm_opaque_ref.assert_called_once_with(self.fake_instance)
self.vmops.vif_driver.plug.assert_called_once_with(
self.fake_instance, self.fake_vif, vm_ref='fake_vm_ref',
device=2)
@mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref')
def test_attach_interface_no_devices(self, mock_get_vm_opaque_ref):
mock_get_vm_opaque_ref.return_value = 'fake_vm_ref'
with mock.patch.object(self._session.VM, 'get_allowed_VIF_devices')\
as fake_devices:
fake_devices.return_value = []
self.assertRaises(exception.InterfaceAttachFailed,
self.vmops.attach_interface,
self.fake_instance, self.fake_vif)
@mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref')
def test_attach_interface_plug_failed(self, mock_get_vm_opaque_ref):
mock_get_vm_opaque_ref.return_value = 'fake_vm_ref'
with mock.patch.object(self._session.VM, 'get_allowed_VIF_devices')\
as fake_devices:
fake_devices.return_value = [2, 3, 4]
self.vmops.vif_driver.plug.side_effect =\
exception.VirtualInterfacePlugException('Failed to plug VIF')
self.assertRaises(exception.VirtualInterfacePlugException,
self.vmops.attach_interface,
self.fake_instance, self.fake_vif)
self.vmops.vif_driver.plug.assert_called_once_with(
self.fake_instance, self.fake_vif, vm_ref='fake_vm_ref',
device=2)
self.vmops.vif_driver.unplug.assert_called_once_with(
self.fake_instance, self.fake_vif, 'fake_vm_ref')
@mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref')
def test_attach_interface_reraise_exception(self, mock_get_vm_opaque_ref):
mock_get_vm_opaque_ref.return_value = 'fake_vm_ref'
with mock.patch.object(self._session.VM, 'get_allowed_VIF_devices')\
as fake_devices:
fake_devices.return_value = [2, 3, 4]
self.vmops.vif_driver.plug.side_effect =\
exception.VirtualInterfacePlugException('Failed to plug VIF')
self.vmops.vif_driver.unplug.side_effect =\
exception.VirtualInterfaceUnplugException(
'Failed to unplug VIF')
ex = self.assertRaises(exception.VirtualInterfacePlugException,
self.vmops.attach_interface,
self.fake_instance, self.fake_vif)
self.assertEqual('Failed to plug VIF', six.text_type(ex))
self.vmops.vif_driver.plug.assert_called_once_with(
self.fake_instance, self.fake_vif, vm_ref='fake_vm_ref',
device=2)
self.vmops.vif_driver.unplug.assert_called_once_with(
self.fake_instance, self.fake_vif, 'fake_vm_ref')
@mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref')
def test_detach_interface(self, mock_get_vm_opaque_ref):
mock_get_vm_opaque_ref.return_value = 'fake_vm_ref'
self.vmops.detach_interface(self.fake_instance, self.fake_vif)
mock_get_vm_opaque_ref.assert_called_once_with(self.fake_instance)
self.vmops.vif_driver.unplug.assert_called_once_with(
self.fake_instance, self.fake_vif, 'fake_vm_ref')
@mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref')
def test_detach_interface_exception(self, mock_get_vm_opaque_ref):
mock_get_vm_opaque_ref.return_value = 'fake_vm_ref'
self.vmops.vif_driver.unplug.side_effect =\
exception.VirtualInterfaceUnplugException('Failed to unplug VIF')
self.assertRaises(exception.VirtualInterfaceUnplugException,
self.vmops.detach_interface,
self.fake_instance, self.fake_vif)
| apache-2.0 |
Peratham/tweater | py/nltk/cluster/kmeans.py | 4 | 7550 | # Natural Language Toolkit: K-Means Clusterer
#
# Copyright (C) 2001-2011 NLTK Project
# Author: Trevor Cohn <tacohn@cs.mu.oz.au>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
import numpy
import random
from api import *
from util import *
class KMeansClusterer(VectorSpaceClusterer):
"""
The K-means clusterer starts with k arbitrary chosen means then allocates
each vector to the cluster with the closest mean. It then recalculates the
means of each cluster as the centroid of the vectors in the cluster. This
process repeats until the cluster memberships stabilise. This is a
hill-climbing algorithm which may converge to a local maximum. Hence the
clustering is often repeated with random initial means and the most
commonly occuring output means are chosen.
"""
def __init__(self, num_means, distance, repeats=1,
conv_test=1e-6, initial_means=None,
normalise=False, svd_dimensions=None,
rng=None):
"""
@param num_means: the number of means to use (may use fewer)
@type num_means: int
@param distance: measure of distance between two vectors
@type distance: function taking two vectors and returing a float
@param repeats: number of randomised clustering trials to use
@type repeats: int
@param conv_test: maximum variation in mean differences before
deemed convergent
@type conv_test: number
@param initial_means: set of k initial means
@type initial_means: sequence of vectors
@param normalise: should vectors be normalised to length 1
@type normalise: boolean
@param svd_dimensions: number of dimensions to use in reducing vector
dimensionsionality with SVD
@type svd_dimensions: int
@param rng: random number generator (or None)
@type rng: Random
"""
VectorSpaceClusterer.__init__(self, normalise, svd_dimensions)
self._num_means = num_means
self._distance = distance
self._max_difference = conv_test
assert not initial_means or len(initial_means) == num_means
self._means = initial_means
assert repeats >= 1
assert not (initial_means and repeats > 1)
self._repeats = repeats
if rng: self._rng = rng
else: self._rng = random.Random()
def cluster_vectorspace(self, vectors, trace=False):
if self._means and self._repeats > 1:
print 'Warning: means will be discarded for subsequent trials'
meanss = []
for trial in range(self._repeats):
if trace: print 'k-means trial', trial
if not self._means or trial > 1:
self._means = self._rng.sample(vectors, self._num_means)
self._cluster_vectorspace(vectors, trace)
meanss.append(self._means)
if len(meanss) > 1:
# sort the means first (so that different cluster numbering won't
# effect the distance comparison)
for means in meanss:
means.sort(cmp = _vector_compare)
# find the set of means that's minimally different from the others
min_difference = min_means = None
for i in range(len(meanss)):
d = 0
for j in range(len(meanss)):
if i != j:
d += self._sum_distances(meanss[i], meanss[j])
if min_difference == None or d < min_difference:
min_difference, min_means = d, meanss[i]
# use the best means
self._means = min_means
def _cluster_vectorspace(self, vectors, trace=False):
if self._num_means < len(vectors):
# perform k-means clustering
converged = False
while not converged:
# assign the tokens to clusters based on minimum distance to
# the cluster means
clusters = [[] for m in range(self._num_means)]
for vector in vectors:
index = self.classify_vectorspace(vector)
clusters[index].append(vector)
if trace: print 'iteration'
#for i in range(self._num_means):
#print ' mean', i, 'allocated', len(clusters[i]), 'vectors'
# recalculate cluster means by computing the centroid of each cluster
new_means = map(self._centroid, clusters)
# measure the degree of change from the previous step for convergence
difference = self._sum_distances(self._means, new_means)
if difference < self._max_difference:
converged = True
# remember the new means
self._means = new_means
def classify_vectorspace(self, vector):
# finds the closest cluster centroid
# returns that cluster's index
best_distance = best_index = None
for index in range(len(self._means)):
mean = self._means[index]
dist = self._distance(vector, mean)
if best_distance == None or dist < best_distance:
best_index, best_distance = index, dist
return best_index
def num_clusters(self):
if self._means:
return len(self._means)
else:
return self._num_means
def means(self):
"""
The means used for clustering.
"""
return self._means
def _sum_distances(self, vectors1, vectors2):
difference = 0.0
for u, v in zip(vectors1, vectors2):
difference += self._distance(u, v)
return difference
def _centroid(self, cluster):
assert len(cluster) > 0
centroid = copy.copy(cluster[0])
for vector in cluster[1:]:
centroid += vector
return centroid / float(len(cluster))
def __repr__(self):
return '<KMeansClusterer means=%s repeats=%d>' % \
(self._means, self._repeats)
def _vector_compare(x, y):
xs, ys = sum(x), sum(y)
if xs < ys: return -1
elif xs > ys: return 1
else: return 0
#################################################################################
def demo():
# example from figure 14.9, page 517, Manning and Schutze
from nltk import cluster
vectors = [numpy.array(f) for f in [[2, 1], [1, 3], [4, 7], [6, 7]]]
means = [[4, 3], [5, 5]]
clusterer = cluster.KMeansClusterer(2, euclidean_distance, initial_means=means)
clusters = clusterer.cluster(vectors, True, trace=True)
print 'Clustered:', vectors
print 'As:', clusters
print 'Means:', clusterer.means()
print
vectors = [numpy.array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]]
# test k-means using the euclidean distance metric, 2 means and repeat
# clustering 10 times with random seeds
clusterer = cluster.KMeansClusterer(2, euclidean_distance, repeats=10)
clusters = clusterer.cluster(vectors, True)
print 'Clustered:', vectors
print 'As:', clusters
print 'Means:', clusterer.means()
print
# classify a new vector
vector = numpy.array([3, 3])
print 'classify(%s):' % vector,
print clusterer.classify(vector)
print
if __name__ == '__main__':
demo()
| gpl-3.0 |
steedos/odoo | addons/l10n_ch/account_wizard.py | 424 | 2192 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi. Copyright Camptocamp SA
# Financial contributors: Hasa SA, Open Net SA,
# Prisme Solutions Informatique SA, Quod SA
#
# Translation contributors: brain-tec AG, Agile Business Group
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv.orm import TransientModel
class WizardMultiChartsAccounts(TransientModel):
_inherit ='wizard.multi.charts.accounts'
def onchange_chart_template_id(self, cursor, uid, ids, chart_template_id=False, context=None):
if context is None: context = {}
res = super(WizardMultiChartsAccounts, self).onchange_chart_template_id(cursor, uid, ids,
chart_template_id=chart_template_id,
context=context)
# 0 is evaluated as False in python so we have to do this
# because original wizard test code_digits value on a float widget
if chart_template_id:
sterchi_template = self.pool.get('ir.model.data').get_object(cursor, uid, 'l10n_ch', 'l10nch_chart_template')
if sterchi_template.id == chart_template_id:
res['value']['code_digits'] = 0
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
supersam654/gatech-maintenance-requests | scripts/summarizer.py | 1 | 2330 | #!/usr/bin/env python
import json
from profanity import profanity
# Too many false positives with "cockroaches".
profanity.get_words()
profanity.words.remove('cock')
import db
# note that this file is relative to the root of the project, not this file.
OUT_FILE = 'site/frontend_data/summaries.json'
def summarize_by_year():
pipeline = [{
'$project': {
'year': {'$year': '$ack_date'}
}
}, {
'$group': {
'_id': '$year',
'count': {'$sum': 1}
}
}, {
'$project': {
'_id': False,
'year': '$_id',
'count': '$count'
}
}]
return list(db.requests.aggregate(pipeline))
def summarize_by_month():
pipeline = [{
'$project': {
'month': {'$month': '$ack_date'}
}
}, {
'$group': {
'_id': '$month',
'count': {'$sum': 1}
}
}, {
'$project': {
'_id': False,
'month': '$_id',
'count': '$count'
}
}]
return list(db.requests.aggregate(pipeline))
def summarize_by_building():
pipeline = [{
'$project': {
'building': '$building'
}
}, {
'$group': {
'_id': '$building',
'count': {'$sum': 1}
}
}, {
'$project': {
'_id': False,
'building': '$_id',
'count': '$count'
}
}]
return list(db.requests.aggregate(pipeline))
def summarize_stats():
stats = {}
stats['count'] = db.requests.count()
profane_requests = 0
for request in db.requests.find():
if profanity.contains_profanity(request['requested_action']):
profane_requests += 1
stats['profane_requests'] = profane_requests
return stats
def make_summaries():
summaries = {}
summaries['by_year'] = summarize_by_year()
summaries['by_month'] = summarize_by_month()
summaries['by_building'] = summarize_by_building()
print('Generating stats. This may take a couple of minutes.')
summaries['stats'] = summarize_stats()
return summaries
def main():
summaries = make_summaries()
with open(OUT_FILE, 'w') as f:
json.dump(summaries, f)
print("Wrote summary data to %s" % OUT_FILE)
if __name__ == '__main__':
main()
| mit |
jonasrogert/Quadcopter | visualization/rotating_cube.py | 1 | 7071 | """
Simulation of a rotating 3D Cube
Developed by Leonel Machava <leonelmachava@gmail.com>
http://codeNtronix.com
"""
import sys, math, pygame
from operator import itemgetter
import sys
import os.path
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from quad.motor import calculate_dc_for_motor
class Point3D:
def __init__(self, x=0, y=0, z=0):
self.x, self.y, self.z = float(x), float(y), float(z)
def rotateX(self, angle):
""" Rotates the point around the X axis by the given angle in degrees. """
rad = angle * math.pi / 180
cosa = math.cos(rad)
sina = math.sin(rad)
y = self.y * cosa - self.z * sina
z = self.y * sina + self.z * cosa
return Point3D(self.x, y, z)
def rotateY(self, angle):
""" Rotates the point around the Y axis by the given angle in degrees. """
rad = angle * math.pi / 180
cosa = math.cos(rad)
sina = math.sin(rad)
z = self.z * cosa - self.x * sina
x = self.z * sina + self.x * cosa
return Point3D(x, self.y, z)
def rotateZ(self, angle):
""" Rotates the point around the Z axis by the given angle in degrees. """
rad = angle * math.pi / 180
cosa = math.cos(rad)
sina = math.sin(rad)
x = self.x * cosa - self.y * sina
y = self.x * sina + self.y * cosa
return Point3D(x, y, self.z)
def project(self, win_width, win_height, fov, viewer_distance):
""" Transforms this 3D point to 2D using a perspective projection. """
factor = fov / (viewer_distance + self.z)
x = self.x * factor + win_width / 2
y = -self.y * factor + win_height / 2
return Point3D(x, y, self.z)
class Simulation:
def __init__(self, win_width = 640, win_height = 480):
pygame.init()
self.screen = pygame.display.set_mode((win_width, win_height))
pygame.display.set_caption("Drone")
self.clock = pygame.time.Clock()
self.drone_vertices = [
Point3D(-1,5,0),
Point3D(1,5,0),
Point3D(1,1,0),
Point3D(5,1,0),
Point3D(5,-1,0),
Point3D(1,-1,0),
Point3D(1,-5,0),
Point3D(-1,-5,0),
Point3D(-1,-1,0),
Point3D(-5,-1,0),
Point3D(-5,1,0),
Point3D(-1,1,0),
]
# TODO fix Rotate the initial position of the drone
for k, v in enumerate(self.drone_vertices):
self.drone_vertices[k] = v.rotateZ(-45)
self.floor_vertices = [
Point3D(-5, -5, 3),
Point3D(-5, 5, 3),
Point3D(5, 5, 3),
Point3D(5, -5, 3),
]
self.vertices = []
self.vertices.extend(self.drone_vertices)
self.vertices.extend(self.floor_vertices)
self.vertices_to_draw = []
# Define the vertices that compose each of the 6 faces. These numbers are
# indices to the vertices list defined above.
self.drone_faces = [(0,1,2,11),(2,3,4,5),(8,9,10,11),(5,6,7,8),(11,2,5,8),]
self.floor_face = [(12,13,14,15)]
self.faces = []
self.faces.extend(self.drone_faces)
self.faces.extend(self.floor_face)
# Define colors for each face
self.colors = [(0, 0, 255), (0, 0, 255), (0, 0, 255), (0, 0, 255),
(0, 255, 255), (0, 100, 0)]
# self.angle = 0
self.angle_x = 0
self.angle_y = 0
self.angle_z = 0
self.projection_angle_x = 45
self.projection_angle_y = 0
self.projection_angle_z = 0
def run(self):
iter = 0
dc = [0, 0, 0, 0]
""" Main Loop """
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
self.clock.tick(50)
self.screen.fill((0, 32, 0))
rad_x = iter * math.pi / 180
rad_y = rad_x + 180
scale_x = math.sin(rad_x)*20
# print(scale_x)
scale_y = math.sin(rad_y)*20
# print(scale_y)
self.angle_x = scale_x
self.angle_y = scale_y
# self.angle += 1
# self.angle_z += 1
sensor_values = {
'yaw': 0,
'pitch': self.angle_y,
'roll': self.angle_x,
}
for m in range(4):
dc[m] = calculate_dc_for_motor(m, .5, sensor_values)
self.colors[m] = (0, 0, 255*dc[m])
# print(dc)
# It will hold transformed vertices.
t = []
self.vertices_to_draw = self.vertices[:]
for k, v in enumerate(self.vertices_to_draw[:-4]):
# Rotate the drone
# Rotate the point around X axis, then around Y axis, and finally around Z axis.
self.vertices_to_draw[k] = v.rotateX(self.angle_x).rotateY(self.angle_y).rotateZ(self.angle_z)
# Transform the point from 3D to 2D
# v = r.project(self.screen.get_width(), self.screen.get_height(), 256, 12)
# Put the point in the list of transformed vertices
# t.append(p)
for v in self.vertices_to_draw:
# Projection
r = v.rotateX(self.projection_angle_x).rotateY(self.projection_angle_y).rotateZ(self.projection_angle_z)
# Transform the point from 3D to 2D
p = r.project(self.screen.get_width(), self.screen.get_height(), 256, 12)
# Put the point in the list of transformed vertices
t.append(p)
# Calculate the average Z values of each face.
avg_z = []
i = 0
for f in self.faces:
z = (t[f[0]].z + t[f[1]].z + t[f[2]].z + t[f[3]].z) / 4.0
avg_z.append([i,z])
i = i + 1
# Draw the faces using the Painter's algorithm:
# Distant faces are drawn before the closer ones.
for tmp in sorted(avg_z,key=itemgetter(1),reverse=True):
face_index = tmp[0]
f = self.faces[face_index]
pointlist = [(t[f[0]].x, t[f[0]].y), (t[f[1]].x, t[f[1]].y),
(t[f[1]].x, t[f[1]].y), (t[f[2]].x, t[f[2]].y),
(t[f[2]].x, t[f[2]].y), (t[f[3]].x, t[f[3]].y),
(t[f[3]].x, t[f[3]].y), (t[f[0]].x, t[f[0]].y)]
pygame.draw.polygon(self.screen, self.colors[face_index], pointlist)
pygame.display.flip()
iter += 1
iter %= 360
# print(iter)
if __name__ == "__main__":
Simulation().run()
| gpl-2.0 |
azureplus/hue | desktop/core/ext-py/PyYAML-3.09/lib/yaml/composer.py | 534 | 4921 |
__all__ = ['Composer', 'ComposerError']
from error import MarkedYAMLError
from events import *
from nodes import *
class ComposerError(MarkedYAMLError):
pass
class Composer(object):
def __init__(self):
self.anchors = {}
def check_node(self):
# Drop the STREAM-START event.
if self.check_event(StreamStartEvent):
self.get_event()
# If there are more documents available?
return not self.check_event(StreamEndEvent)
def get_node(self):
# Get the root node of the next document.
if not self.check_event(StreamEndEvent):
return self.compose_document()
def get_single_node(self):
# Drop the STREAM-START event.
self.get_event()
# Compose a document if the stream is not empty.
document = None
if not self.check_event(StreamEndEvent):
document = self.compose_document()
# Ensure that the stream contains no more documents.
if not self.check_event(StreamEndEvent):
event = self.get_event()
raise ComposerError("expected a single document in the stream",
document.start_mark, "but found another document",
event.start_mark)
# Drop the STREAM-END event.
self.get_event()
return document
def compose_document(self):
# Drop the DOCUMENT-START event.
self.get_event()
# Compose the root node.
node = self.compose_node(None, None)
# Drop the DOCUMENT-END event.
self.get_event()
self.anchors = {}
return node
def compose_node(self, parent, index):
if self.check_event(AliasEvent):
event = self.get_event()
anchor = event.anchor
if anchor not in self.anchors:
raise ComposerError(None, None, "found undefined alias %r"
% anchor.encode('utf-8'), event.start_mark)
return self.anchors[anchor]
event = self.peek_event()
anchor = event.anchor
if anchor is not None:
if anchor in self.anchors:
raise ComposerError("found duplicate anchor %r; first occurence"
% anchor.encode('utf-8'), self.anchors[anchor].start_mark,
"second occurence", event.start_mark)
self.descend_resolver(parent, index)
if self.check_event(ScalarEvent):
node = self.compose_scalar_node(anchor)
elif self.check_event(SequenceStartEvent):
node = self.compose_sequence_node(anchor)
elif self.check_event(MappingStartEvent):
node = self.compose_mapping_node(anchor)
self.ascend_resolver()
return node
def compose_scalar_node(self, anchor):
event = self.get_event()
tag = event.tag
if tag is None or tag == u'!':
tag = self.resolve(ScalarNode, event.value, event.implicit)
node = ScalarNode(tag, event.value,
event.start_mark, event.end_mark, style=event.style)
if anchor is not None:
self.anchors[anchor] = node
return node
def compose_sequence_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == u'!':
tag = self.resolve(SequenceNode, None, start_event.implicit)
node = SequenceNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
index = 0
while not self.check_event(SequenceEndEvent):
node.value.append(self.compose_node(node, index))
index += 1
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
def compose_mapping_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == u'!':
tag = self.resolve(MappingNode, None, start_event.implicit)
node = MappingNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
while not self.check_event(MappingEndEvent):
#key_event = self.peek_event()
item_key = self.compose_node(node, None)
#if item_key in node.value:
# raise ComposerError("while composing a mapping", start_event.start_mark,
# "found duplicate key", key_event.start_mark)
item_value = self.compose_node(node, item_key)
#node.value[item_key] = item_value
node.value.append((item_key, item_value))
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
| apache-2.0 |
eric-stanley/NewsBlur | utils/feedparser.py | 2 | 155430 | """Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit https://code.google.com/p/feedparser/ for the latest version
Visit http://packages.python.org/feedparser/ for the latest documentation
Required: Python 2.4 or later
Recommended: iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "5.1.3"
__license__ = """
Copyright (c) 2010-2013 Kurt McKee <contactme@kurtmckee.org>
Copyright (c) 2002-2008 Mark Pilgrim
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>",
"Sam Ruby <http://intertwingly.net/>",
"Ade Oshineye <http://blog.oshineye.com/>",
"Martin Pool <http://sourcefrog.net/>",
"Kurt McKee <http://kurtmckee.org/>",
"Bernd Schlapsi <https://github.com/brot>",]
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +https://code.google.com/p/feedparser/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically resolve all relative URIs, set this
# to 1.
RESOLVE_RELATIVE_URIS = 1
# If you want feedparser to automatically sanitize all potentially unsafe
# HTML content, set this to 1.
SANITIZE_HTML = 1
# ---------- Python 3 modules (make it work if possible) ----------
try:
import rfc822
except ImportError:
from email import _parseaddr as rfc822
try:
# Python 3.1 introduces bytes.maketrans and simultaneously
# deprecates string.maketrans; use bytes.maketrans if possible
_maketrans = bytes.maketrans
except (NameError, AttributeError):
import string
_maketrans = string.maketrans
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except ImportError:
base64 = binascii = None
else:
# Python 3.1 deprecates decodestring in favor of decodebytes
_base64decode = getattr(base64, 'decodebytes', base64.decodestring)
# _s2bytes: convert a UTF-8 str to bytes if the interpreter is Python 3
# _l2bytes: convert a list of ints to bytes if the interpreter is Python 3
try:
if bytes is str:
# In Python 2.5 and below, bytes doesn't exist (NameError)
# In Python 2.6 and above, bytes and str are the same type
raise NameError
except NameError:
# Python 2
def _s2bytes(s):
return s
def _l2bytes(l):
return ''.join(map(chr, l))
else:
# Python 3
def _s2bytes(s):
return bytes(s, 'utf8')
def _l2bytes(l):
return bytes(l)
# If you want feedparser to allow all URL schemes, set this to ()
# List culled from Python's urlparse documentation at:
# http://docs.python.org/library/urlparse.html
# as well as from "URI scheme" at Wikipedia:
# https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme
# Many more will likely need to be added!
ACCEPTABLE_URI_SCHEMES = (
'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet',
'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu',
'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet',
'wais',
# Additional common-but-unofficial schemes
'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
)
#ACCEPTABLE_URI_SCHEMES = ()
# ---------- required modules (should come with any Python distribution) ----------
import cgi
import codecs
import copy
import datetime
import itertools
import re
import struct
import time
import types
import urllib
import urllib2
import urlparse
import warnings
from htmlentitydefs import name2codepoint, codepoint2name, entitydefs
try:
from io import BytesIO as _StringIO
except ImportError:
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except ImportError:
gzip = None
try:
import zlib
except ImportError:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
from xml.sax.saxutils import escape as _xmlescape
except ImportError:
_XML_AVAILABLE = 0
def _xmlescape(data,entities={}):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
for char, entity in entities:
data = data.replace(char, entity)
return data
else:
try:
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
except xml.sax.SAXReaderNotAvailable:
_XML_AVAILABLE = 0
else:
_XML_AVAILABLE = 1
# sgmllib is not available by default in Python 3; if the end user doesn't have
# it available then we'll lose illformed XML parsing and content santizing
try:
import sgmllib
except ImportError:
# This is probably Python 3, which doesn't include sgmllib anymore
_SGML_AVAILABLE = 0
# Mock sgmllib enough to allow subclassing later on
class sgmllib(object):
class SGMLParser(object):
def goahead(self, i):
pass
def parse_starttag(self, i):
pass
else:
_SGML_AVAILABLE = 1
# sgmllib defines a number of module-level regular expressions that are
# insufficient for the XML parsing feedparser needs. Rather than modify
# the variables directly in sgmllib, they're defined here using the same
# names, and the compiled code objects of several sgmllib.SGMLParser
# methods are copied into _BaseHTMLProcessor so that they execute in
# feedparser's scope instead of sgmllib's scope.
charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);')
tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?'
)
# Unfortunately, these must be copied over to prevent NameError exceptions
entityref = sgmllib.entityref
incomplete = sgmllib.incomplete
interesting = sgmllib.interesting
shorttag = sgmllib.shorttag
shorttagopen = sgmllib.shorttagopen
starttagopen = sgmllib.starttagopen
class _EndBracketRegEx:
def __init__(self):
# Overriding the built-in sgmllib.endbracket regex allows the
# parser to find angle brackets embedded in element attributes.
self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self, target, index=0):
match = self.endbracket.match(target, index)
if match is not None:
# Returning a new object in the calling thread's context
# resolves a thread-safety.
return EndBracketMatch(match)
return None
class EndBracketMatch:
def __init__(self, match):
self.match = match
def start(self, n):
return self.match.end(n)
endbracket = _EndBracketRegEx()
# iconv_codec provides support for more character encodings.
# It's available from http://cjkpython.i18n.org/
try:
import iconv_codec
except ImportError:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
except ImportError:
chardet = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
SUPPORTED_VERSIONS = {'': u'unknown',
'rss090': u'RSS 0.90',
'rss091n': u'RSS 0.91 (Netscape)',
'rss091u': u'RSS 0.91 (Userland)',
'rss092': u'RSS 0.92',
'rss093': u'RSS 0.93',
'rss094': u'RSS 0.94',
'rss20': u'RSS 2.0',
'rss10': u'RSS 1.0',
'rss': u'RSS (unknown version)',
'atom01': u'Atom 0.1',
'atom02': u'Atom 0.2',
'atom03': u'Atom 0.3',
'atom10': u'Atom 1.0',
'atom': u'Atom (unknown version)',
'cdf': u'CDF',
}
class FeedParserDict(dict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['summary', 'subtitle'],
'description_detail': ['summary_detail', 'subtitle_detail'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
try:
return dict.__getitem__(self, 'tags')[0]['term']
except IndexError:
raise KeyError, "object doesn't have key 'category'"
elif key == 'enclosures':
norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
return [norel(link) for link in dict.__getitem__(self, 'links') if link['rel']==u'enclosure']
elif key == 'license':
for link in dict.__getitem__(self, 'links'):
if link['rel']==u'license' and 'href' in link:
return link['href']
elif key == 'updated':
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
if not dict.__contains__(self, 'updated') and \
dict.__contains__(self, 'published'):
warnings.warn("To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated` to `published` if `updated` doesn't "
"exist. This fallback will be removed in a future version "
"of feedparser.", DeprecationWarning)
return dict.__getitem__(self, 'published')
return dict.__getitem__(self, 'updated')
elif key == 'updated_parsed':
if not dict.__contains__(self, 'updated_parsed') and \
dict.__contains__(self, 'published_parsed'):
warnings.warn("To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated_parsed` to `published_parsed` if "
"`updated_parsed` doesn't exist. This fallback will be "
"removed in a future version of feedparser.",
DeprecationWarning)
return dict.__getitem__(self, 'published_parsed')
return dict.__getitem__(self, 'updated_parsed')
else:
realkey = self.keymap.get(key, key)
if isinstance(realkey, list):
for k in realkey:
if dict.__contains__(self, k):
return dict.__getitem__(self, k)
elif dict.__contains__(self, realkey):
return dict.__getitem__(self, realkey)
return dict.__getitem__(self, key)
def __contains__(self, key):
if key in ('updated', 'updated_parsed'):
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
return dict.__contains__(self, key)
try:
self.__getitem__(key)
except KeyError:
return False
else:
return True
has_key = __contains__
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def __setitem__(self, key, value):
key = self.keymap.get(key, key)
if isinstance(key, list):
key = key[0]
return dict.__setitem__(self, key, value)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return value
return self[key]
def __getattr__(self, key):
# __getattribute__() is called first; this will be called
# only if an attribute was not already found
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError, "object has no attribute '%s'" % key
def __hash__(self):
return id(self)
_cp1252 = {
128: unichr(8364), # euro sign
130: unichr(8218), # single low-9 quotation mark
131: unichr( 402), # latin small letter f with hook
132: unichr(8222), # double low-9 quotation mark
133: unichr(8230), # horizontal ellipsis
134: unichr(8224), # dagger
135: unichr(8225), # double dagger
136: unichr( 710), # modifier letter circumflex accent
137: unichr(8240), # per mille sign
138: unichr( 352), # latin capital letter s with caron
139: unichr(8249), # single left-pointing angle quotation mark
140: unichr( 338), # latin capital ligature oe
142: unichr( 381), # latin capital letter z with caron
145: unichr(8216), # left single quotation mark
146: unichr(8217), # right single quotation mark
147: unichr(8220), # left double quotation mark
148: unichr(8221), # right double quotation mark
149: unichr(8226), # bullet
150: unichr(8211), # en dash
151: unichr(8212), # em dash
152: unichr( 732), # small tilde
153: unichr(8482), # trade mark sign
154: unichr( 353), # latin small letter s with caron
155: unichr(8250), # single right-pointing angle quotation mark
156: unichr( 339), # latin small ligature oe
158: unichr( 382), # latin small letter z with caron
159: unichr( 376), # latin capital letter y with diaeresis
}
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
if not isinstance(uri, unicode):
uri = uri.decode('utf-8', 'ignore')
try:
uri = urlparse.urljoin(base, uri)
except ValueError:
uri = u''
if not isinstance(uri, unicode):
return uri.decode('utf-8', 'ignore')
return uri
class _FeedParserMixin:
namespaces = {
'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://www.georss.org/georss': 'georss',
'http://www.opengis.net/gml': 'gml',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
# Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
'http://search.yahoo.com/mrss/': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/': 'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://www.w3.org/XML/1998/namespace': 'xml',
'http://podlove.org/simple-chapters': 'psc',
}
_matchnamespaces = {}
can_be_relative_uri = set(['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo'])
can_contain_relative_uris = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
can_contain_dangerous_markup = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
html_types = [u'text/html', u'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding=u'utf-8'):
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = u'' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
# georss
self.ingeometry = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or u''
self.lang = baselang or None
self.svgOK = 0
self.title_depth = -1
self.depth = 0
# psc_chapters_flag prevents multiple psc_chapters from being
# captured in a single entry or item. The transition states are
# None -> True -> False. psc_chapter elements will only be
# captured while it is True.
self.psc_chapters_flag = None
if baselang:
self.feeddata['language'] = baselang.replace('_','-')
# A map of the following form:
# {
# object_that_value_is_set_on: {
# property_name: depth_of_node_property_was_extracted_from,
# other_property: depth_of_node_property_was_extracted_from,
# },
# }
self.property_depth_map = {}
def _normalize_attributes(self, kv):
k = kv[0].lower()
v = k in ('rel', 'type') and kv[1].lower() or kv[1]
# the sgml parser doesn't handle entities in attributes, nor
# does it pass the attribute values through as unicode, while
# strict xml parsers do -- account for this difference
if isinstance(self, _LooseFeedParser):
v = v.replace('&', '&')
if not isinstance(v, unicode):
v = v.decode('utf-8')
return (k, v)
def unknown_starttag(self, tag, attrs):
# increment depth counter
self.depth += 1
# normalize attrs
attrs = map(self._normalize_attributes, attrs)
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
if not isinstance(baseuri, unicode):
baseuri = baseuri.decode(self.encoding, 'ignore')
# ensure that self.baseuri is always an absolute URI that
# uses a whitelisted URI scheme (e.g. not `javscript:`)
if self.baseuri:
self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri
else:
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_','-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = u'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
if tag.find(':') <> -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespacesInUse.get(prefix, '')
if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns',namespace))
if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
attrs.append(('xmlns',namespace))
if tag == 'svg':
self.svgOK += 1
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
# Since there's no handler or something has gone wrong we explicitly add the element and its attributes
unknown_tag = prefix + suffix
if len(attrsD) == 0:
# No attributes so merge it into the encosing dictionary
return self.push(unknown_tag, 1)
else:
# Has attributes so create it in its own dictionary
context = self._getContext()
context[unknown_tag] = attrsD
def unknown_endtag(self, tag):
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
if suffix == 'svg' and self.svgOK:
self.svgOK -= 1
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
if self.svgOK:
raise AttributeError()
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
# element declared itself as escaped markup, but it isn't really
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
self.contentparams['type'] = u'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
self.depth -= 1
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack:
return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack:
return
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities:
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try:
name2codepoint[ref]
except KeyError:
text = '&%s;' % ref
else:
text = unichr(name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack:
return
if escape and self.contentparams.get('type') == u'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
# CDATA block began but didn't finish
k = len(self.rawdata)
return k
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
if k >= 0:
return k+1
else:
# We have an incomplete CDATA block.
return k
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text' or contentType == 'plain':
contentType = u'text/plain'
elif contentType == 'html':
contentType = u'text/html'
elif contentType == 'xhtml':
contentType = u'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if not self.version:
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/'):
self.version = u'rss090'
elif loweruri == 'http://purl.org/rss/1.0/':
self.version = u'rss10'
elif loweruri == 'http://www.w3.org/2005/atom':
self.version = u'atom10'
if loweruri.find(u'backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = u'http://backend.userland.com/rss'
loweruri = uri
if loweruri in self._matchnamespaces:
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or u'', uri)
def decodeEntities(self, element, data):
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack:
return
if self.elementstack[-1][0] != element:
return
element, expectingText, pieces = self.elementstack.pop()
if self.version == u'atom10' and self.contentparams.get('type', u'text') == u'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces)>1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces)>1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0:
break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
# Ensure each piece is a str for Python 3
for (i, v) in enumerate(pieces):
if not isinstance(v, unicode):
pieces[i] = v.decode('utf-8')
output = u''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText:
return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = _base64decode(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
except TypeError:
# In Python 3, base64 takes and outputs bytes, not str
# This may not be the most correct way to accomplish this
output = _base64decode(output.encode('utf-8')).decode('utf-8')
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
# do not resolve guid elements with isPermalink="false"
if not element == 'id' or self.guidislink:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
# some feed formats require consumers to guess
# whether the content is html or plain text
if not self.version.startswith(u'atom') and self.contentparams.get('type') == u'text/plain':
if self.lookslikehtml(output):
self.contentparams['type'] = u'text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.mapContentType(self.contentparams.get('type', u'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish and RESOLVE_RELATIVE_URIS:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', u'text/html'))
# sanitize embedded markup
if is_htmlish and SANITIZE_HTML:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', u'text/html'))
if self.encoding and not isinstance(output, unicode):
output = output.decode(self.encoding, 'ignore')
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding in (u'utf-8', u'utf-8_INVALID_PYTHON_3') and isinstance(output, unicode):
try:
output = output.encode('iso-8859-1').decode('utf-8')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
# map win-1252 extensions to the proper code points
if isinstance(output, unicode):
output = output.translate(_cp1252)
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
if element == 'title' and -1 < self.title_depth <= self.depth:
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
if not self.inimage:
# query variables in urls in link elements are improperly
# converted from `?a=1&b=2` to `?a=1&b;=2` as if they're
# unhandled character references. fix this special case.
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
old_value_depth = self.property_depth_map.setdefault(self.entries[-1], {}).get(element)
if old_value_depth is None or self.depth <= old_value_depth:
self.property_depth_map[self.entries[-1]][element] = self.depth
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
# fix query variables; see above for the explanation
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
context[element] = output
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
if self.lang:
self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
@staticmethod
def lookslikehtml(s):
# must have a close tag or an entity reference to qualify
if not (re.search(r'</(\w+)>',s) or re.search("&#?\w+;",s)):
return
# all tags must be in a restricted subset of valid HTML tags
if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
re.findall(r'</?(\w+)',s)):
return
# all entities must have been defined as valid HTML entities
if filter(lambda e: e not in entitydefs.keys(), re.findall(r'&(\w+);', s)):
return
return 1
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith(u'text/'):
return 0
if self.contentparams['type'].endswith(u'+xml'):
return 0
if self.contentparams['type'].endswith(u'/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value, overwrite=False):
context = self._getContext()
if overwrite:
context[key] = value
else:
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': u'rss091u',
'0.92': u'rss092',
'0.93': u'rss093',
'0.94': u'rss094'}
#If we're here then this is an RSS feed.
#If we don't have a version or have a version that starts with something
#other than RSS then there's been a mistake. Correct it.
if not self.version or not self.version.startswith(u'rss'):
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = u'rss20'
else:
self.version = u'rss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
def _cdf_common(self, attrsD):
if 'lastmod' in attrsD:
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if 'href' in attrsD:
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': u'atom01',
'0.2': u'atom02',
'0.3': u'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = u'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
context = self._getContext()
if not self.inentry:
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.title_depth = -1
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.title_depth = -1
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
# Append a new FeedParserDict when expecting an author
context = self._getContext()
context.setdefault('authors', [])
context['authors'].append(FeedParserDict())
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inimage and 'image' in self.feeddata:
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
context.setdefault('authors', [FeedParserDict()])
context['authors'][-1][key] = value
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = u'%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author:
return
emailmatch = re.search(ur'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, u'')
author = author.replace(u'()', u'')
author = author.replace(u'<>', u'')
author = author.replace(u'<>', u'')
author = author.strip()
if author and (author[0] == u'('):
author = author[1:]
if author and (author[-1] == u')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, FeedParserDict())
if author:
context['%s_detail' % key]['name'] = author
if email:
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, u'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, u'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
self.title_depth = -1
self.psc_chapters_flag = None
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
_start_pubdate = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value), overwrite=True)
_end_dcterms_issued = _end_published
_end_issued = _end_published
_end_pubdate = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_dc_date = _start_updated
_start_lastbuilddate = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value, overwrite=True)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_dc_date = _end_updated
_end_lastbuilddate = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value), overwrite=True)
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True)
# geospatial location, or "where", from georss.org
def _start_georssgeom(self, attrsD):
self.push('geometry', 0)
context = self._getContext()
context['where'] = FeedParserDict()
_start_georss_point = _start_georssgeom
_start_georss_line = _start_georssgeom
_start_georss_polygon = _start_georssgeom
_start_georss_box = _start_georssgeom
def _save_where(self, geometry):
context = self._getContext()
context['where'].update(geometry)
def _end_georss_point(self):
geometry = _parse_georss_point(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _end_georss_line(self):
geometry = _parse_georss_line(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _end_georss_polygon(self):
this = self.pop('geometry')
geometry = _parse_georss_polygon(this)
if geometry:
self._save_where(geometry)
def _end_georss_box(self):
geometry = _parse_georss_box(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _start_where(self, attrsD):
self.push('where', 0)
context = self._getContext()
context['where'] = FeedParserDict()
_start_georss_where = _start_where
def _parse_srs_attrs(self, attrsD):
srsName = attrsD.get('srsname')
try:
srsDimension = int(attrsD.get('srsdimension', '2'))
except ValueError:
srsDimension = 2
context = self._getContext()
context['where']['srsName'] = srsName
context['where']['srsDimension'] = srsDimension
def _start_gml_point(self, attrsD):
self._parse_srs_attrs(attrsD)
self.ingeometry = 1
self.push('geometry', 0)
def _start_gml_linestring(self, attrsD):
self._parse_srs_attrs(attrsD)
self.ingeometry = 'linestring'
self.push('geometry', 0)
def _start_gml_polygon(self, attrsD):
self._parse_srs_attrs(attrsD)
self.push('geometry', 0)
def _start_gml_exterior(self, attrsD):
self.push('geometry', 0)
def _start_gml_linearring(self, attrsD):
self.ingeometry = 'polygon'
self.push('geometry', 0)
def _start_gml_pos(self, attrsD):
self.push('pos', 0)
def _end_gml_pos(self):
this = self.pop('pos')
context = self._getContext()
srsName = context['where'].get('srsName')
srsDimension = context['where'].get('srsDimension', 2)
swap = True
if srsName and "EPSG" in srsName:
epsg = int(srsName.split(":")[-1])
swap = bool(epsg in _geogCS)
geometry = _parse_georss_point(this, swap=swap, dims=srsDimension)
if geometry:
self._save_where(geometry)
def _start_gml_poslist(self, attrsD):
self.push('pos', 0)
def _end_gml_poslist(self):
this = self.pop('pos')
context = self._getContext()
srsName = context['where'].get('srsName')
srsDimension = context['where'].get('srsDimension', 2)
swap = True
if srsName and "EPSG" in srsName:
epsg = int(srsName.split(":")[-1])
swap = bool(epsg in _geogCS)
geometry = _parse_poslist(
this, self.ingeometry, swap=swap, dims=srsDimension)
if geometry:
self._save_where(geometry)
def _end_geom(self):
self.ingeometry = 0
self.pop('geometry')
_end_gml_point = _end_geom
_end_gml_linestring = _end_geom
_end_gml_linearring = _end_geom
_end_gml_exterior = _end_geom
_end_gml_polygon = _end_geom
def _end_where(self):
self.pop('where')
_end_georss_where = _end_where
# end geospatial
def _start_cc_license(self, attrsD):
context = self._getContext()
value = self._getAttribute(attrsD, 'rdf:resource')
attrsD = FeedParserDict()
attrsD['rel'] = u'license'
if value:
attrsD['href']=value
context.setdefault('links', []).append(attrsD)
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
_start_creativeCommons_license = _start_creativecommons_license
def _end_creativecommons_license(self):
value = self.pop('license')
context = self._getContext()
attrsD = FeedParserDict()
attrsD['rel'] = u'license'
if value:
attrsD['href'] = value
context.setdefault('links', []).append(attrsD)
del context['license']
_end_creativeCommons_license = _end_creativecommons_license
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label):
return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(value)
def _start_category(self, attrsD):
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _start_media_category(self, attrsD):
attrsD.setdefault('scheme', u'http://search.yahoo.com/mrss/category_schema')
self._start_category(attrsD)
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split(','):
if term.strip():
self._addTag(term.strip(), u'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), u'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value:
return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
_end_media_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', u'alternate')
if attrsD['rel'] == u'self':
attrsD.setdefault('type', u'application/atom+xml')
else:
attrsD.setdefault('type', u'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
if not (self.inentry and self.inimage):
context['links'].append(FeedParserDict(attrsD))
if 'href' in attrsD:
expectingText = 0
if (attrsD.get('rel') == u'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
def _end_link(self):
value = self.pop('link')
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
_start_id = _start_guid
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and 'link' not in self._getContext())
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
_end_id = _end_guid
def _start_title(self, attrsD):
if self.svgOK:
return self.unknown_starttag('title', attrsD.items())
self.pushContent('title', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
if self.svgOK:
return
value = self.popContent('title')
if not value:
return
self.title_depth = self.depth
_end_dc_title = _end_title
def _end_media_title(self):
title_depth = self.title_depth
self._end_title()
self.title_depth = title_depth
def _start_description(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, u'text/html', self.infeed or self.inentry or self.insource)
_start_dc_description = _start_description
_start_media_description = _start_description
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
self._summaryKey = None
_end_abstract = _end_description
_end_dc_description = _end_description
_end_media_description = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, u'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if 'generator_detail' in context:
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, u'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
attrsD['rel'] = u'enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
def _start_source(self, attrsD):
if 'url' in attrsD:
# This means that we're processing a source element from an RSS 2.0 feed
self.sourcedata['href'] = attrsD[u'url']
self.push('source', 1)
self.insource = 1
self.title_depth = -1
def _end_source(self):
self.insource = 0
value = self.pop('source')
if value:
self.sourcedata['title'] = value
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, u'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, u'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, u'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToSummary = self.mapContentType(self.contentparams.get('type')) in ([u'text/plain'] + self.html_types)
value = self.popContent('content')
if copyToSummary:
self._save('summary', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
if attrsD.get('href'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
elif attrsD.get('url'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('url')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
# Convert 'yes' -> True, 'clean' to False, and any other value to None
# False and None both evaluate as False, so the difference can be ignored
# by applications that only need to know if the content is explicit.
self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0]
def _start_media_group(self, attrsD):
# don't do anything, but don't break the enclosed tags either
pass
def _start_media_credit(self, attrsD):
context = self._getContext()
context.setdefault('media_credit', [])
context['media_credit'].append(attrsD)
self.push('credit', 1)
def _end_media_credit(self):
credit = self.pop('credit')
if credit != None and len(credit.strip()) != 0:
context = self._getContext()
context['media_credit'][-1]['content'] = credit
def _start_media_restriction(self, attrsD):
context = self._getContext()
context.setdefault('media_restriction', attrsD)
self.push('restriction', 1)
def _end_media_restriction(self):
restriction = self.pop('restriction')
if restriction != None and len(restriction.strip()) != 0:
context = self._getContext()
context['media_restriction']['content'] = restriction
def _start_media_license(self, attrsD):
context = self._getContext()
context.setdefault('media_license', attrsD)
self.push('license', 1)
def _end_media_license(self):
license = self.pop('license')
if license != None and len(license.strip()) != 0:
context = self._getContext()
context['media_license']['content'] = license
def _start_media_content(self, attrsD):
context = self._getContext()
context.setdefault('media_content', [])
context['media_content'].append(attrsD)
def _start_media_thumbnail(self, attrsD):
context = self._getContext()
context.setdefault('media_thumbnail', [])
self.push('url', 1) # new
context['media_thumbnail'].append(attrsD)
def _end_media_thumbnail(self):
url = self.pop('url')
context = self._getContext()
if url != None and len(url.strip()) != 0:
if 'url' not in context['media_thumbnail'][-1]:
context['media_thumbnail'][-1]['url'] = url
def _start_media_player(self, attrsD):
self.push('media_player', 0)
self._getContext()['media_player'] = FeedParserDict(attrsD)
def _end_media_player(self):
value = self.pop('media_player')
context = self._getContext()
context['media_player']['content'] = value
def _start_newlocation(self, attrsD):
self.push('newlocation', 1)
def _end_newlocation(self):
url = self.pop('newlocation')
context = self._getContext()
# don't set newlocation if the context isn't right
if context is not self.feeddata:
return
context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip())
def _start_psc_chapters(self, attrsD):
if self.psc_chapters_flag is None:
# Transition from None -> True
self.psc_chapters_flag = True
attrsD['chapters'] = []
self._getContext()['psc_chapters'] = FeedParserDict(attrsD)
def _end_psc_chapters(self):
# Transition from True -> False
self.psc_chapters_flag = False
def _start_psc_chapter(self, attrsD):
if self.psc_chapters_flag:
start = self._getAttribute(attrsD, 'start')
attrsD['start_parsed'] = _parse_psc_chapter_start(start)
context = self._getContext()['psc_chapters']
context['chapters'].append(FeedParserDict(attrsD))
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
self.decls = {}
def startPrefixMapping(self, prefix, uri):
if not uri:
return
# Jython uses '' instead of None; standardize on None
prefix = prefix or None
self.trackNamespace(prefix, uri)
if prefix and uri == 'http://www.w3.org/1999/xlink':
self.decls['xmlns:' + prefix] = uri
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find(u'backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = u'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and givenprefix not in self.namespacesInUse:
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD, self.decls = self.decls, {}
if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrsD['xmlns']=namespace
if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
attrsD['xmlns']=namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
for (namespace, attrlocalname), attrvalue in attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
localname = str(localname).lower()
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
# drv_libxml2 calls warning() in some cases
warning = error
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = set([
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param',
'source', 'track', 'wbr'
])
def __init__(self, encoding, _type):
self.encoding = encoding
self._type = _type
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
# By declaring these methods and overriding their compiled code
# with the code from sgmllib, the original code will execute in
# feedparser's scope instead of sgmllib's. This means that the
# `tagfind` and `charref` regular expressions will be found as
# they're declared above, not as they're declared in sgmllib.
def goahead(self, i):
pass
goahead.func_code = sgmllib.SGMLParser.goahead.func_code
def __parse_starttag(self, i):
pass
__parse_starttag.func_code = sgmllib.SGMLParser.parse_starttag.func_code
def parse_starttag(self,i):
j = self.__parse_starttag(i)
if self._type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
try:
bytes
if bytes is str:
raise NameError
self.encoding = self.encoding + u'_INVALID_PYTHON_3'
except NameError:
if self.encoding and isinstance(data, unicode):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
if not attrs:
return attrs
# utility method to be called by descendants
attrs = dict([(k.lower(), v) for k, v in attrs]).items()
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
uattrs = []
strattrs=''
if attrs:
for key, value in attrs:
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
if not isinstance(value, unicode):
value = value.decode(self.encoding, 'ignore')
try:
# Currently, in Python 3 the key is already a str, and cannot be decoded again
uattrs.append((unicode(key, self.encoding), value))
except TypeError:
uattrs.append((key, value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
if self.encoding:
try:
strattrs = strattrs.encode(self.encoding)
except (UnicodeEncodeError, LookupError):
pass
if tag in self.elements_no_end_tag:
self.pieces.append('<%s%s />' % (tag, strattrs))
else:
self.pieces.append('<%s%s>' % (tag, strattrs))
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%s>" % tag)
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
ref = ref.lower()
if ref.startswith('x'):
value = int(ref[1:], 16)
else:
value = int(ref)
if value in _cp1252:
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%s;' % ref)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
if ref in name2codepoint or ref == 'apos':
self.pieces.append('&%s;' % ref)
else:
self.pieces.append('&%s' % ref)
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%s-->' % text)
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%s>' % text)
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%s>' % text)
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def convert_charref(self, name):
return '&#%s;' % name
def convert_entityref(self, name):
return '&%s;' % name
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
def parse_declaration(self, i):
try:
return sgmllib.SGMLParser.parse_declaration(self, i)
except sgmllib.SGMLParseError:
# escape the doctype declaration and continue parsing
self.handle_data('<')
return i+1
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding, entities):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
_BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
self.entities=entities
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if not self.contentparams.get('type', u'xml').endswith(u'xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs])
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = set([('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src'),
('video', 'poster')])
def __init__(self, baseuri, encoding, _type):
_BaseHTMLProcessor.__init__(self, encoding, _type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _makeSafeAbsoluteURI(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _RelativeURIResolver(baseURI, encoding, _type)
p.feed(htmlSource)
return p.output()
def _makeSafeAbsoluteURI(base, rel=None):
# bail if ACCEPTABLE_URI_SCHEMES is empty
if not ACCEPTABLE_URI_SCHEMES:
return _urljoin(base, rel or u'')
if not base:
return rel or u''
if not rel:
try:
scheme = urlparse.urlparse(base)[0]
except ValueError:
return u''
if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
return base
return u''
uri = _urljoin(base, rel)
if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
return u''
return uri
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = set(['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript',
'object', 'embed', 'iframe', 'param'])
acceptable_attributes = set(['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data',
'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay',
'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
'poster', 'pqg', 'preload', 'prompt', 'radiogroup', 'readonly', 'rel',
'repeat-max', 'repeat-min', 'replace', 'required', 'rev', 'rightspacing',
'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span',
'src', 'start', 'step', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang'])
unacceptable_elements_with_end_tag = set(['script', 'applet', 'style'])
acceptable_css_properties = set(['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width'])
# survey of common keywords found in feeds
acceptable_css_keywords = set(['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow'])
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = set(['annotation', 'annotation-xml', 'maction', 'math',
'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle',
'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none', 'semantics'])
mathml_attributes = set(['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows',
'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness',
'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant',
'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign',
'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href',
'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink'])
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = set(['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
'svg', 'switch', 'text', 'title', 'tspan', 'use'])
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = set(['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity',
'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style',
'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2',
'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x',
'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines',
'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid',
'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max',
'min', 'name', 'offset', 'opacity', 'orient', 'origin',
'overline-position', 'overline-thickness', 'panose-1', 'path',
'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY',
'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures',
'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv',
'stop-color', 'stop-opacity', 'strikethrough-position',
'strikethrough-thickness', 'stroke', 'stroke-dasharray',
'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage',
'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2',
'underline-position', 'underline-thickness', 'unicode', 'unicode-range',
'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width',
'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1',
'y2', 'zoomAndPan'])
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = set([ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity'])
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if not tag in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# add implicit namespaces to html5 inline svg/mathml
if self._type.endswith('html'):
if not dict(attrs).get('xmlns'):
if tag=='svg':
attrs.append( ('xmlns','http://www.w3.org/2000/svg') )
if tag=='math':
attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') )
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK += 1
if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
self.svgOK += 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# for most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case
if not self.svg_attr_map:
lower=[attr.lower() for attr in self.svg_attributes]
mix=[a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = dict([(a.lower(),a) for a in mix])
lower=[attr.lower() for attr in self.svg_elements]
mix=[a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = dict([(a.lower(),a) for a in mix])
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag,tag)
keymap = self.svg_attr_map
elif not tag in self.acceptable_elements:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if filter(lambda (n,v): n.startswith('xlink:'),attrs):
if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key in acceptable_attributes:
key=keymap.get(key,key)
# make sure the uri uses an acceptable uri scheme
if key == u'href':
value = _makeSafeAbsoluteURI(value)
clean_attrs.append((key,value))
elif key=='style':
clean_value = self.sanitize_style(value)
if clean_value:
clean_attrs.append((key,clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math' and self.mathmlOK:
self.mathmlOK -= 1
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag,tag)
if tag == 'svg' and self.svgOK:
self.svgOK -= 1
else:
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def sanitize_style(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
# This replaced a regexp that used re.match and was prone to pathological back-tracking.
if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip():
return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value:
continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not self.valid_css_values.match(keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def parse_comment(self, i, report=1):
ret = _BaseHTMLProcessor.parse_comment(self, i, report)
if ret >= 0:
return ret
# if ret == -1, this may be a malicious attempt to circumvent
# sanitization, or a page-destroying unclosed comment
match = re.compile(r'--[^>]*>').search(self.rawdata, i+4)
if match:
return match.end()
# unclosed comment; deliberately fail to handle_data()
return len(self.rawdata)
def _sanitizeHTML(htmlSource, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _HTMLSanitizer(encoding, _type)
htmlSource = htmlSource.replace('<![CDATA[', '<![CDATA[')
p.feed(htmlSource)
data = p.output()
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
# The default implementation just raises HTTPError.
# Forget that.
fp.status = code
return fp
def http_error_301(self, req, fp, code, msg, hdrs):
result = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp,
code, msg, hdrs)
result.status = code
result.newurl = result.geturl()
return result
# The default implementations in urllib2.HTTPRedirectHandler
# are identical, so hardcoding a http_error_301 call above
# won't affect anything
http_error_300 = http_error_301
http_error_302 = http_error_301
http_error_303 = http_error_301
http_error_307 = http_error_301
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
if base64 is None or 'Authorization' not in req.headers \
or 'WWW-Authenticate' not in headers:
return self.http_error_default(req, fp, code, msg, headers)
auth = _base64decode(req.headers['Authorization'].split(' ')[1])
user, passw = auth.split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
if request_headers is supplied it is a dictionary of HTTP request headers
that will override the values generated by FeedParser.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if isinstance(url_file_stream_or_string, basestring) \
and urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'):
# Deal with the feed URI scheme
if url_file_stream_or_string.startswith('feed:http'):
url_file_stream_or_string = url_file_stream_or_string[5:]
elif url_file_stream_or_string.startswith('feed:'):
url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:]
if not agent:
agent = USER_AGENT
# Test for inline user:password credentials for HTTP basic auth
auth = None
if base64 and not url_file_stream_or_string.startswith('ftp:'):
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.standard_b64encode(user_passwd).strip()
# iri support
if isinstance(url_file_stream_or_string, unicode):
url_file_stream_or_string = _convert_to_idn(url_file_stream_or_string)
# try to open with urllib2 (to use optional headers)
request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers)
opener = urllib2.build_opener(*tuple(handlers + [_FeedURLHandler()]))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string, 'rb')
except (IOError, UnicodeEncodeError, TypeError):
# if url_file_stream_or_string is a unicode object that
# cannot be converted to the encoding returned by
# sys.getfilesystemencoding(), a UnicodeEncodeError
# will be thrown
# If url_file_stream_or_string is a string that contains NULL
# (such as an XML document encoded in UTF-32), TypeError will
# be thrown.
pass
# treat url_file_stream_or_string as string
if isinstance(url_file_stream_or_string, unicode):
return _StringIO(url_file_stream_or_string.encode('utf-8'))
return _StringIO(url_file_stream_or_string)
def _convert_to_idn(url):
"""Convert a URL to IDN notation"""
# this function should only be called with a unicode string
# strategy: if the host cannot be encoded in ascii, then
# it'll be necessary to encode it in idn form
parts = list(urlparse.urlsplit(url))
try:
parts[1].encode('ascii')
except UnicodeEncodeError:
# the url needs to be converted to idn notation
host = parts[1].rsplit(':', 1)
newhost = []
port = u''
if len(host) == 2:
port = host.pop()
for h in host[0].split('.'):
newhost.append(h.encode('idna').decode('utf-8'))
parts[1] = '.'.join(newhost)
if port:
parts[1] += ':' + port
return urlparse.urlunsplit(parts)
else:
return url
def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers):
request = urllib2.Request(url)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if isinstance(modified, basestring):
modified = _parse_date(modified)
elif isinstance(modified, datetime.datetime):
modified = modified.utctimetuple()
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
# use this for whatever -- cookies, special headers, etc
# [('Cookie','Something'),('x-special-header','Another Value')]
for header_name, header_value in request_headers.items():
request.add_header(header_name, header_value)
request.add_header('A-IM', 'feed') # RFC 3229 support
return request
def _parse_psc_chapter_start(start):
FORMAT = r'^((\d{2}):)?(\d{2}):(\d{2})(\.(\d{3}))?$'
m = re.compile(FORMAT).match(start)
if m is None:
return None
_, h, m, s, _, ms = m.groups()
h, m, s, ms = (int(h or 0), int(m), int(s), int(ms or 0))
return datetime.timedelta(0, h*60*60 + m*60 + s, ms*1000)
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(\.(?P<fracsecond>\d+))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
try:
del tmpl
except NameError:
pass
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
try:
del regex
except NameError:
pass
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m:
break
if not m:
return
if m.span() == (0, 0):
return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params:
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tuple(tm)))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m:
return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m:
return
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m or m.group(2) not in _hungarian_months:
return None
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
timezonenames = {
'ut': 0, 'gmt': 0, 'z': 0,
'adt': -3, 'ast': -4, 'at': -4,
'edt': -4, 'est': -5, 'et': -5,
'cdt': -5, 'cst': -6, 'ct': -6,
'mdt': -6, 'mst': -7, 'mt': -7,
'pdt': -7, 'pst': -8, 'pt': -8,
'a': -1, 'n': 1,
'm': -12, 'y': 12,
}
# W3 date and time format parser
# http://www.w3.org/TR/NOTE-datetime
# Also supports MSSQL-style datetimes as defined at:
# http://msdn.microsoft.com/en-us/library/ms186724.aspx
# (basically, allow a space as a date/time/timezone separator)
def _parse_date_w3dtf(datestr):
if not datestr.strip():
return None
parts = datestr.lower().split('t')
if len(parts) == 1:
# This may be a date only, or may be an MSSQL-style date
parts = parts[0].split()
if len(parts) == 1:
# Treat this as a date only
parts.append('00:00:00z')
elif len(parts) > 2:
return None
date = parts[0].split('-', 2)
if not date or len(date[0]) != 4:
return None
# Ensure that `date` has 3 elements. Using '1' sets the default
# month to January and the default day to the 1st of the month.
date.extend(['1'] * (3 - len(date)))
try:
year, month, day = [int(i) for i in date]
except ValueError:
# `date` may have more than 3 elements or may contain
# non-integer strings.
return None
if parts[1].endswith('z'):
parts[1] = parts[1][:-1]
parts.append('z')
# Append the numeric timezone offset, if any, to parts.
# If this is an MSSQL-style date then parts[2] already contains
# the timezone information, so `append()` will not affect it.
# Add 1 to each value so that if `find()` returns -1 it will be
# treated as False.
loc = parts[1].find('-') + 1 or parts[1].find('+') + 1 or len(parts[1]) + 1
loc = loc - 1
parts.append(parts[1][loc:])
parts[1] = parts[1][:loc]
time = parts[1].split(':', 2)
# Ensure that time has 3 elements. Using '0' means that the
# minutes and seconds, if missing, will default to 0.
time.extend(['0'] * (3 - len(time)))
tzhour = 0
tzmin = 0
if parts[2][:1] in ('-', '+'):
try:
tzhour = int(parts[2][1:3])
tzmin = int(parts[2][4:])
except ValueError:
return None
if parts[2].startswith('-'):
tzhour = tzhour * -1
tzmin = tzmin * -1
else:
tzhour = timezonenames.get(parts[2], 0)
try:
hour, minute, second = [int(float(i)) for i in time]
except ValueError:
return None
# Create the datetime object and timezone delta objects
try:
stamp = datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
# Return the date and timestamp in a UTC 9-tuple
try:
return (stamp - delta).utctimetuple()
except (OverflowError, ValueError):
# IronPython throws ValueErrors instead of OverflowErrors
return None
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(date):
"""Parse RFC 822 dates and times
http://tools.ietf.org/html/rfc822#section-5
There are some formatting differences that are accounted for:
1. Years may be two or four digits.
2. The month and day can be swapped.
3. Additional timezone names are supported.
4. A default time and timezone are assumed if only a date is present.
"""
daynames = set(['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'])
months = {
'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6,
'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12,
}
parts = date.lower().split()
if len(parts) < 5:
# Assume that the time and timezone are missing
parts.extend(('00:00:00', '0000'))
# Remove the day name
if parts[0][:3] in daynames:
parts = parts[1:]
if len(parts) < 5:
# If there are still fewer than five parts, there's not enough
# information to interpret this
return None
try:
day = int(parts[0])
except ValueError:
# Check if the day and month are swapped
if months.get(parts[0][:3]):
try:
day = int(parts[1])
except ValueError:
return None
else:
parts[1] = parts[0]
else:
return None
month = months.get(parts[1][:3])
if not month:
return None
try:
year = int(parts[2])
except ValueError:
return None
# Normalize two-digit years:
# Anything in the 90's is interpreted as 1990 and on
# Anything 89 or less is interpreted as 2089 or before
if len(parts[2]) <= 2:
year += (1900, 2000)[year < 90]
timeparts = parts[3].split(':')
timeparts = timeparts + ([0] * (3 - len(timeparts)))
try:
(hour, minute, second) = map(int, timeparts)
except ValueError:
return None
tzhour = 0
tzmin = 0
# Strip 'Etc/' from the timezone
if parts[4].startswith('etc/'):
parts[4] = parts[4][4:]
# Normalize timezones that start with 'gmt':
# GMT-05:00 => -0500
# GMT => GMT
if parts[4].startswith('gmt'):
parts[4] = ''.join(parts[4][3:].split(':')) or 'gmt'
# Handle timezones like '-0500', '+0500', and 'EST'
if parts[4] and parts[4][0] in ('-', '+'):
try:
tzhour = int(parts[4][1:3])
tzmin = int(parts[4][3:])
except ValueError:
return None
if parts[4].startswith('-'):
tzhour = tzhour * -1
tzmin = tzmin * -1
else:
tzhour = timezonenames.get(parts[4], 0)
# Create the datetime object and timezone delta objects
try:
stamp = datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
# Return the date and timestamp in a UTC 9-tuple
try:
return (stamp - delta).utctimetuple()
except (OverflowError, ValueError):
# IronPython throws ValueErrors instead of OverflowErrors
return None
registerDateHandler(_parse_date_rfc822)
_months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
def _parse_date_asctime(dt):
"""Parse asctime-style dates"""
dayname, month, day, remainder = dt.split(None, 3)
# Convert month and day into zero-padded integers
month = '%02i ' % (_months.index(month.lower()) + 1)
day = '%02i ' % (int(day),)
dt = month + day + remainder
return time.strptime(dt, '%m %d %H:%M:%S %Y')[:-1] + (0, )
registerDateHandler(_parse_date_asctime)
def _parse_date_perforce(aDateString):
"""parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
# Fri, 2006/09/15 08:19:53 EDT
_my_date_pattern = re.compile( \
r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
m = _my_date_pattern.search(aDateString)
if m is None:
return None
dow, year, month, day, hour, minute, second, tz = m.groups()
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
registerDateHandler(_parse_date_perforce)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
if not dateString:
return None
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
except (KeyError, OverflowError, ValueError):
continue
if not date9tuple:
continue
if len(date9tuple) != 9:
continue
return date9tuple
return None
# Each marker represents some of the characters of the opening XML
# processing instruction ('<?xm') in the specified encoding.
EBCDIC_MARKER = _l2bytes([0x4C, 0x6F, 0xA7, 0x94])
UTF16BE_MARKER = _l2bytes([0x00, 0x3C, 0x00, 0x3F])
UTF16LE_MARKER = _l2bytes([0x3C, 0x00, 0x3F, 0x00])
UTF32BE_MARKER = _l2bytes([0x00, 0x00, 0x00, 0x3C])
UTF32LE_MARKER = _l2bytes([0x3C, 0x00, 0x00, 0x00])
ZERO_BYTES = _l2bytes([0x00, 0x00])
# Match the opening XML declaration.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_DECLARATION = re.compile('^<\?xml[^>]*?>')
# Capture the value of the XML processing instruction's encoding attribute.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_PI_ENCODING = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>'))
def convert_to_utf8(http_headers, data):
'''Detect and convert the character encoding to UTF-8.
http_headers is a dictionary
data is a raw string (not Unicode)'''
# This is so much trickier than it sounds, it's not even funny.
# According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
# is application/xml, application/*+xml,
# application/xml-external-parsed-entity, or application/xml-dtd,
# the encoding given in the charset parameter of the HTTP Content-Type
# takes precedence over the encoding given in the XML prefix within the
# document, and defaults to 'utf-8' if neither are specified. But, if
# the HTTP Content-Type is text/xml, text/*+xml, or
# text/xml-external-parsed-entity, the encoding given in the XML prefix
# within the document is ALWAYS IGNORED and only the encoding given in
# the charset parameter of the HTTP Content-Type header should be
# respected, and it defaults to 'us-ascii' if not specified.
# Furthermore, discussion on the atom-syntax mailing list with the
# author of RFC 3023 leads me to the conclusion that any document
# served with a Content-Type of text/* and no charset parameter
# must be treated as us-ascii. (We now do this.) And also that it
# must always be flagged as non-well-formed. (We now do this too.)
# If Content-Type is unspecified (input was local file or non-HTTP source)
# or unrecognized (server just got it totally wrong), then go by the
# encoding given in the XML prefix of the document and default to
# 'iso-8859-1' as per the HTTP specification (RFC 2616).
# Then, assuming we didn't find a character encoding in the HTTP headers
# (and the HTTP Content-type allowed us to look in the body), we need
# to sniff the first few bytes of the XML data and try to determine
# whether the encoding is ASCII-compatible. Section F of the XML
# specification shows the way here:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# If the sniffed encoding is not ASCII-compatible, we need to make it
# ASCII compatible so that we can sniff further into the XML declaration
# to find the encoding attribute, which will tell us the true encoding.
# Of course, none of this guarantees that we will be able to parse the
# feed in the declared character encoding (assuming it was declared
# correctly, which many are not). iconv_codec can help a lot;
# you should definitely install it if you can.
# http://cjkpython.i18n.org/
bom_encoding = u''
xml_encoding = u''
rfc3023_encoding = u''
# Look at the first few bytes of the document to guess what
# its encoding may be. We only need to decode enough of the
# document that we can use an ASCII-compatible regular
# expression to search for an XML encoding declaration.
# The heuristic follows the XML specification, section F:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# Check for BOMs first.
if data[:4] == codecs.BOM_UTF32_BE:
bom_encoding = u'utf-32be'
data = data[4:]
elif data[:4] == codecs.BOM_UTF32_LE:
bom_encoding = u'utf-32le'
data = data[4:]
elif data[:2] == codecs.BOM_UTF16_BE and data[2:4] != ZERO_BYTES:
bom_encoding = u'utf-16be'
data = data[2:]
elif data[:2] == codecs.BOM_UTF16_LE and data[2:4] != ZERO_BYTES:
bom_encoding = u'utf-16le'
data = data[2:]
elif data[:3] == codecs.BOM_UTF8:
bom_encoding = u'utf-8'
data = data[3:]
# Check for the characters '<?xm' in several encodings.
elif data[:4] == EBCDIC_MARKER:
bom_encoding = u'cp037'
elif data[:4] == UTF16BE_MARKER:
bom_encoding = u'utf-16be'
elif data[:4] == UTF16LE_MARKER:
bom_encoding = u'utf-16le'
elif data[:4] == UTF32BE_MARKER:
bom_encoding = u'utf-32be'
elif data[:4] == UTF32LE_MARKER:
bom_encoding = u'utf-32le'
tempdata = data
try:
if bom_encoding:
tempdata = data.decode(bom_encoding).encode('utf-8')
except (UnicodeDecodeError, LookupError):
# feedparser recognizes UTF-32 encodings that aren't
# available in Python 2.4 and 2.5, so it's possible to
# encounter a LookupError during decoding.
xml_encoding_match = None
else:
xml_encoding_match = RE_XML_PI_ENCODING.match(tempdata)
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower()
# Normalize the xml_encoding if necessary.
if bom_encoding and (xml_encoding in (
u'u16', u'utf-16', u'utf16', u'utf_16',
u'u32', u'utf-32', u'utf32', u'utf_32',
u'iso-10646-ucs-2', u'iso-10646-ucs-4',
u'csucs4', u'csunicode', u'ucs-2', u'ucs-4'
)):
xml_encoding = bom_encoding
# Find the HTTP Content-Type and, hopefully, a character
# encoding provided by the server. The Content-Type is used
# to choose the "correct" encoding among the BOM encoding,
# XML declaration encoding, and HTTP encoding, following the
# heuristic defined in RFC 3023.
http_content_type = http_headers.get('content-type') or ''
http_content_type, params = cgi.parse_header(http_content_type)
http_encoding = params.get('charset', '').replace("'", "")
if not isinstance(http_encoding, unicode):
http_encoding = http_encoding.decode('utf-8', 'ignore')
acceptable_content_type = 0
application_content_types = (u'application/xml', u'application/xml-dtd',
u'application/xml-external-parsed-entity')
text_content_types = (u'text/xml', u'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith(u'application/') and
http_content_type.endswith(u'+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or xml_encoding or u'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith(u'text/') and
http_content_type.endswith(u'+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or u'us-ascii'
elif http_content_type.startswith(u'text/'):
rfc3023_encoding = http_encoding or u'us-ascii'
elif http_headers and 'content-type' not in http_headers:
rfc3023_encoding = xml_encoding or u'iso-8859-1'
else:
rfc3023_encoding = xml_encoding or u'utf-8'
# gb18030 is a superset of gb2312, so always replace gb2312
# with gb18030 for greater compatibility.
if rfc3023_encoding.lower() == u'gb2312':
rfc3023_encoding = u'gb18030'
if xml_encoding.lower() == u'gb2312':
xml_encoding = u'gb18030'
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - bom_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - rfc3023_encoding is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
error = None
if http_headers and (not acceptable_content_type):
if 'content-type' in http_headers:
msg = '%s is not an XML media type' % http_headers['content-type']
else:
msg = 'no Content-type specified'
error = NonXMLContentType(msg)
# determine character encoding
known_encoding = 0
chardet_encoding = None
tried_encodings = []
if chardet:
chardet_encoding = chardet.detect(data)['encoding']
if not chardet_encoding:
chardet_encoding = ''
if not isinstance(chardet_encoding, unicode):
chardet_encoding = unicode(chardet_encoding, 'ascii', 'ignore')
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (rfc3023_encoding, xml_encoding, bom_encoding,
chardet_encoding, u'utf-8', u'windows-1252', u'iso-8859-2'):
if not proposed_encoding:
continue
if proposed_encoding in tried_encodings:
continue
tried_encodings.append(proposed_encoding)
try:
data = data.decode(proposed_encoding)
except (UnicodeDecodeError, LookupError):
pass
else:
known_encoding = 1
# Update the encoding in the opening XML processing instruction.
new_declaration = '''<?xml version='1.0' encoding='utf-8'?>'''
if RE_XML_DECLARATION.search(data):
data = RE_XML_DECLARATION.sub(new_declaration, data)
else:
data = new_declaration + u'\n' + data
data = data.encode('utf-8')
break
# if still no luck, give up
if not known_encoding:
error = CharacterEncodingUnknown(
'document encoding unknown, I tried ' +
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' %
(rfc3023_encoding, xml_encoding))
rfc3023_encoding = u''
elif proposed_encoding != rfc3023_encoding:
error = CharacterEncodingOverride(
'document declared as %s, but parsed as %s' %
(rfc3023_encoding, proposed_encoding))
rfc3023_encoding = proposed_encoding
return data, rfc3023_encoding, error
# Match XML entity declarations.
# Example: <!ENTITY copyright "(C)">
RE_ENTITY_PATTERN = re.compile(_s2bytes(r'^\s*<!ENTITY([^>]*?)>'), re.MULTILINE)
# Match XML DOCTYPE declarations.
# Example: <!DOCTYPE feed [ ]>
RE_DOCTYPE_PATTERN = re.compile(_s2bytes(r'^\s*<!DOCTYPE([^>]*?)>'), re.MULTILINE)
# Match safe entity declarations.
# This will allow hexadecimal character references through,
# as well as text, but not arbitrary nested entities.
# Example: cubed "³"
# Example: copyright "(C)"
# Forbidden: explode1 "&explode2;&explode2;"
RE_SAFE_ENTITY_PATTERN = re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"'))
def replace_doctype(data):
'''Strips and replaces the DOCTYPE, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document with a replaced DOCTYPE
'''
# Divide the document into two groups by finding the location
# of the first element that doesn't begin with '<?' or '<!'.
start = re.search(_s2bytes('<\w'), data)
start = start and start.start() or -1
head, data = data[:start+1], data[start+1:]
# Save and then remove all of the ENTITY declarations.
entity_results = RE_ENTITY_PATTERN.findall(head)
head = RE_ENTITY_PATTERN.sub(_s2bytes(''), head)
# Find the DOCTYPE declaration and check the feed type.
doctype_results = RE_DOCTYPE_PATTERN.findall(head)
doctype = doctype_results and doctype_results[0] or _s2bytes('')
if _s2bytes('netscape') in doctype.lower():
version = u'rss091n'
else:
version = None
# Re-insert the safe ENTITY declarations if a DOCTYPE was found.
replacement = _s2bytes('')
if len(doctype_results) == 1 and entity_results:
match_safe_entities = lambda e: RE_SAFE_ENTITY_PATTERN.match(e)
safe_entities = filter(match_safe_entities, entity_results)
if safe_entities:
replacement = _s2bytes('<!DOCTYPE feed [\n<!ENTITY') \
+ _s2bytes('>\n<!ENTITY ').join(safe_entities) \
+ _s2bytes('>\n]>')
data = RE_DOCTYPE_PATTERN.sub(replacement, head) + data
# Precompute the safe entities for the loose parser.
safe_entities = dict((k.decode('utf-8'), v.decode('utf-8'))
for k, v in RE_SAFE_ENTITY_PATTERN.findall(replacement))
return version, data, safe_entities
# GeoRSS geometry parsers. Each return a dict with 'type' and 'coordinates'
# items, or None in the case of a parsing error.
def _parse_poslist(value, geom_type, swap=True, dims=2):
if geom_type == 'linestring':
return _parse_georss_line(value, swap, dims)
elif geom_type == 'polygon':
ring = _parse_georss_line(value, swap, dims)
return {'type': u'Polygon', 'coordinates': (ring['coordinates'],)}
else:
return None
def _gen_georss_coords(value, swap=True, dims=2):
# A generator of (lon, lat) pairs from a string of encoded GeoRSS
# coordinates. Converts to floats and swaps order.
latlons = itertools.imap(float, value.strip().replace(',', ' ').split())
nxt = latlons.next
while True:
t = [nxt(), nxt()][::swap and -1 or 1]
if dims == 3:
t.append(nxt())
yield tuple(t)
def _parse_georss_point(value, swap=True, dims=2):
# A point contains a single latitude-longitude pair, separated by
# whitespace. We'll also handle comma separators.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {u'type': u'Point', u'coordinates': coords[0]}
except (IndexError, ValueError):
return None
def _parse_georss_line(value, swap=True, dims=2):
# A line contains a space separated list of latitude-longitude pairs in
# WGS84 coordinate reference system, with each pair separated by
# whitespace. There must be at least two pairs.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {u'type': u'LineString', u'coordinates': coords}
except (IndexError, ValueError):
return None
def _parse_georss_polygon(value, swap=True, dims=2):
# A polygon contains a space separated list of latitude-longitude pairs,
# with each pair separated by whitespace. There must be at least four
# pairs, with the last being identical to the first (so a polygon has a
# minimum of three actual points).
try:
ring = list(_gen_georss_coords(value, swap, dims))
except (IndexError, ValueError):
return None
if len(ring) < 4:
return None
return {u'type': u'Polygon', u'coordinates': (ring,)}
def _parse_georss_box(value, swap=True, dims=2):
# A bounding box is a rectangular region, often used to define the extents
# of a map or a rough area of interest. A box contains two space seperate
# latitude-longitude pairs, with each pair separated by whitespace. The
# first pair is the lower corner, the second is the upper corner.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {u'type': u'Box', u'coordinates': tuple(coords)}
except (IndexError, ValueError):
return None
# end geospatial parsers
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, response_headers=None):
'''Parse a feed from a URL, file, stream, or string.
request_headers, if given, is a dict from http header name to value to add
to the request; this overrides internally generated values.
'''
if handlers is None:
handlers = []
if request_headers is None:
request_headers = {}
if response_headers is None:
response_headers = {}
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
result['bozo'] = 0
if not isinstance(handlers, list):
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = None
f = None
if hasattr(f, 'headers'):
result['headers'] = dict(f.headers)
# overwrite existing headers using response_headers
if 'headers' in result:
result['headers'].update(response_headers)
elif response_headers:
result['headers'] = copy.deepcopy(response_headers)
# lowercase all of the HTTP headers for comparisons per RFC 2616
if 'headers' in result:
http_headers = dict((k.lower(), v) for k, v in result['headers'].items())
else:
http_headers = {}
# if feed is gzip-compressed, decompress it
if f and data and http_headers:
if gzip and 'gzip' in http_headers.get('content-encoding', ''):
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except (IOError, struct.error), e:
# IOError can occur if the gzip header is bad.
# struct.error can occur if the data is damaged.
result['bozo'] = 1
result['bozo_exception'] = e
if isinstance(e, struct.error):
# A gzip header was found but the data is corrupt.
# Ideally, we should re-request the feed without the
# 'Accept-encoding: gzip' header, but we don't.
data = None
elif zlib and 'deflate' in http_headers.get('content-encoding', ''):
try:
data = zlib.decompress(data)
except zlib.error, e:
try:
# The data may have no headers and no checksum.
data = zlib.decompress(data, -15)
except zlib.error, e:
result['bozo'] = 1
result['bozo_exception'] = e
# save HTTP headers
if http_headers:
if 'etag' in http_headers:
etag = http_headers.get('etag', u'')
if not isinstance(etag, unicode):
etag = etag.decode('utf-8', 'ignore')
if etag:
result['etag'] = etag
if 'last-modified' in http_headers:
modified = http_headers.get('last-modified', u'')
if modified:
result['modified'] = modified
result['modified_parsed'] = _parse_date(modified)
if hasattr(f, 'url'):
if not isinstance(f.url, unicode):
result['href'] = f.url.decode('utf-8', 'ignore')
else:
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'close'):
f.close()
if data is None:
return result
# Stop processing if the server sent HTTP 304 Not Modified.
if getattr(f, 'code', 0) == 304:
result['version'] = u''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
data, result['encoding'], error = convert_to_utf8(http_headers, data)
use_strict_parser = result['encoding'] and True or False
if error is not None:
result['bozo'] = 1
result['bozo_exception'] = error
result['version'], data, entities = replace_doctype(data)
# Ensure that baseuri is an absolute URI using an acceptable URI scheme.
contentloc = http_headers.get('content-location', u'')
href = result.get('href', u'')
baseuri = _makeSafeAbsoluteURI(href, contentloc) or _makeSafeAbsoluteURI(contentloc) or href
baselang = http_headers.get('content-language', None)
if not isinstance(baselang, unicode) and baselang is not None:
baselang = baselang.decode('utf-8', 'ignore')
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
try:
# disable downloading external doctype references, if possible
saxparser.setFeature(xml.sax.handler.feature_external_ges, 0)
except xml.sax.SAXNotSupportedException:
pass
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
try:
saxparser.parse(source)
except xml.sax.SAXException, e:
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser and _SGML_AVAILABLE:
feedparser = _LooseFeedParser(baseuri, baselang, 'utf-8', entities)
feedparser.feed(data.decode('utf-8', 'replace'))
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
# The list of EPSG codes for geographic (latitude/longitude) coordinate
# systems to support decoding of GeoRSS GML profiles.
_geogCS = [
3819, 3821, 3824, 3889, 3906, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008,
4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4018, 4019, 4020, 4021, 4022,
4023, 4024, 4025, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036,
4041, 4042, 4043, 4044, 4045, 4046, 4047, 4052, 4053, 4054, 4055, 4075, 4081,
4120, 4121, 4122, 4123, 4124, 4125, 4126, 4127, 4128, 4129, 4130, 4131, 4132,
4133, 4134, 4135, 4136, 4137, 4138, 4139, 4140, 4141, 4142, 4143, 4144, 4145,
4146, 4147, 4148, 4149, 4150, 4151, 4152, 4153, 4154, 4155, 4156, 4157, 4158,
4159, 4160, 4161, 4162, 4163, 4164, 4165, 4166, 4167, 4168, 4169, 4170, 4171,
4172, 4173, 4174, 4175, 4176, 4178, 4179, 4180, 4181, 4182, 4183, 4184, 4185,
4188, 4189, 4190, 4191, 4192, 4193, 4194, 4195, 4196, 4197, 4198, 4199, 4200,
4201, 4202, 4203, 4204, 4205, 4206, 4207, 4208, 4209, 4210, 4211, 4212, 4213,
4214, 4215, 4216, 4218, 4219, 4220, 4221, 4222, 4223, 4224, 4225, 4226, 4227,
4228, 4229, 4230, 4231, 4232, 4233, 4234, 4235, 4236, 4237, 4238, 4239, 4240,
4241, 4242, 4243, 4244, 4245, 4246, 4247, 4248, 4249, 4250, 4251, 4252, 4253,
4254, 4255, 4256, 4257, 4258, 4259, 4260, 4261, 4262, 4263, 4264, 4265, 4266,
4267, 4268, 4269, 4270, 4271, 4272, 4273, 4274, 4275, 4276, 4277, 4278, 4279,
4280, 4281, 4282, 4283, 4284, 4285, 4286, 4287, 4288, 4289, 4291, 4292, 4293,
4294, 4295, 4296, 4297, 4298, 4299, 4300, 4301, 4302, 4303, 4304, 4306, 4307,
4308, 4309, 4310, 4311, 4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4322,
4324, 4326, 4463, 4470, 4475, 4483, 4490, 4555, 4558, 4600, 4601, 4602, 4603,
4604, 4605, 4606, 4607, 4608, 4609, 4610, 4611, 4612, 4613, 4614, 4615, 4616,
4617, 4618, 4619, 4620, 4621, 4622, 4623, 4624, 4625, 4626, 4627, 4628, 4629,
4630, 4631, 4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642,
4643, 4644, 4645, 4646, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664, 4665,
4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675, 4676, 4677, 4678,
4679, 4680, 4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689, 4690, 4691,
4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4701, 4702, 4703, 4704,
4705, 4706, 4707, 4708, 4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4717,
4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4730,
4731, 4732, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740, 4741, 4742, 4743,
4744, 4745, 4746, 4747, 4748, 4749, 4750, 4751, 4752, 4753, 4754, 4755, 4756,
4757, 4758, 4759, 4760, 4761, 4762, 4763, 4764, 4765, 4801, 4802, 4803, 4804,
4805, 4806, 4807, 4808, 4809, 4810, 4811, 4813, 4814, 4815, 4816, 4817, 4818,
4819, 4820, 4821, 4823, 4824, 4901, 4902, 4903, 4904, 4979 ] | mit |
pyoceans/python-seawater | seawater/eos80.py | 2 | 30335 | # -*- coding: utf-8 -*-
from __future__ import division, absolute_import
import numpy as np
from .constants import deg2rad, earth_radius
from .library import T90conv, T68conv, salrt, salrp, sals, seck, smow
__all__ = ['adtg',
'alpha',
'aonb',
'beta',
'dpth',
'g',
'salt',
'fp',
'svel',
'pres',
'dens0',
'dens',
'pden',
'cp',
'ptmp',
'temp']
def adtg(s, t, p):
"""
Calculates adiabatic temperature gradient as per UNESCO 1983 routines.
Parameters
----------
s(p) : array_like
salinity [psu (PSS-78)]
t(p) : array_like
temperature [℃ (ITS-90)]
p : array_like
pressure [db]
Returns
-------
adtg : array_like
adiabatic temperature gradient [℃ db :sup:`-1`]
Examples
--------
>>> # Data from UNESCO 1983 p45.
>>> import seawater as sw
>>> from seawater.library import T90conv
>>> t = T90conv([[ 0, 0, 0, 0, 0, 0],
... [10, 10, 10, 10, 10, 10],
... [20, 20, 20, 20, 20, 20],
... [30, 30, 30, 30, 30, 30],
... [40, 40, 40, 40, 40, 40]])
>>> s = [[25, 25, 25, 35, 35, 35],
... [25, 25, 25, 35, 35, 35],
... [25, 25, 25, 35, 35, 35],
... [25, 25, 25, 35, 35, 35],
... [25, 25, 25, 35, 35, 35]]
>>> p = [0, 5000, 10000, 0, 5000, 10000]
>>> sw.adtg(s, t, p)
array([[ 1.68710000e-05, 1.04700000e-04, 1.69426000e-04,
3.58030000e-05, 1.17956500e-04, 1.77007000e-04],
[ 1.00194580e-04, 1.60959050e-04, 2.06874170e-04,
1.14887280e-04, 1.71364200e-04, 2.12991770e-04],
[ 1.73819840e-04, 2.13534000e-04, 2.44483760e-04,
1.84273240e-04, 2.21087800e-04, 2.49137960e-04],
[ 2.41720460e-04, 2.64764100e-04, 2.82959590e-04,
2.47934560e-04, 2.69466550e-04, 2.86150390e-04],
[ 3.07870120e-04, 3.16988600e-04, 3.23006480e-04,
3.09844920e-04, 3.18839700e-04, 3.24733880e-04]])
References
----------
.. [1] Fofonoff, P. and Millard, R.C. Jr UNESCO 1983. Algorithms for
computation of fundamental properties of seawater. UNESCO Tech. Pap. in
Mar. Sci., No. 44, 53 pp.
http://unesdoc.unesco.org/images/0005/000598/059832eb.pdf
.. [2] Bryden, H. 1973. New Polynomials for thermal expansion, adiabatic
temperature gradient and potential temperature of sea water. Deep-Sea
Res. Vol20,401-408. doi:10.1016/0011-7471(73)90063-6
"""
s, t, p = list(map(np.asanyarray, (s, t, p)))
T68 = T68conv(t)
a = [3.5803e-5, 8.5258e-6, -6.836e-8, 6.6228e-10]
b = [1.8932e-6, -4.2393e-8]
c = [1.8741e-8, -6.7795e-10, 8.733e-12, -5.4481e-14]
d = [-1.1351e-10, 2.7759e-12]
e = [-4.6206e-13, 1.8676e-14, -2.1687e-16]
return (a[0] + (a[1] + (a[2] + a[3] * T68) * T68) * T68 +
(b[0] + b[1] * T68) * (s - 35) +
((c[0] + (c[1] + (c[2] + c[3] * T68) * T68) * T68) +
(d[0] + d[1] * T68) * (s - 35)) * p +
(e[0] + (e[1] + e[2] * T68) * T68) * p * p)
def alpha(s, t, p, pt=False):
"""
Calculate the thermal expansion coefficient.
Parameters
----------
s(p) : array_like
salinity [psu (PSS-78)]
t(p) : array_like
temperature or potential temperature [℃ (ITS-90)]
p : array_like
pressure [db].
pt : bool
True if temperature is potential, default is False
Returns
-------
alpha : array_like
thermal expansion coeff :math:`\\alpha` [℃ :sup:`-1`]
Examples
--------
>>> # Data from McDougall 1987
>>> import seawater as sw
>>> s, t, p = 40, 10, 4000
>>> sw.alpha(s, t, p, pt=True)
0.00025061316481624323
References
----------
.. [1] McDougall, Trevor J., 1987: Neutral Surfaces. J. Phys. Oceanogr.,
17, 1950-1964. doi: 10.1175/1520-0485(1987)017<1950:NS>2.0.CO;2
"""
s, t, p, pt = list(map(np.asanyarray, (s, t, p, pt)))
return aonb(s, t, p, pt) * beta(s, t, p, pt)
def aonb(s, t, p, pt=False):
"""
Calculate :math:`\\alpha/\\beta`.
Parameters
----------
s(p) : array_like
salinity [psu (PSS-78)]
t(p) : array_like
temperature or potential temperature [℃ (ITS-90)]
p : array_like
pressure [db].
pt : bool
True if temperature is potential, default is False
Returns
-------
aonb : array_like
:math:`\\alpha/\\beta` [psu ℃ :sup:`-1`]
Examples
--------
>>> # Data from McDougall 1987.
>>> import seawater as sw
>>> s, t, p = 40, 10, 4000
>>> sw.aonb(s, t, p, pt=True)
0.347650567047807
References
----------
.. [1] McDougall, Trevor J., 1987: Neutral Surfaces. J. Phys. Oceanogr.,
17, 1950-1964. doi: 10.1175/1520-0485(1987)017<1950:NS>2.0.CO;2
"""
# Ensure we use ptmp in calculations.
s, t, p, pt = list(map(np.asanyarray, (s, t, p, pt)))
if not pt:
t = ptmp(s, t, p, 0) # Now we have ptmp.
p = np.float_(p)
t = T68conv(t)
c1 = np.array([-0.255019e-7, 0.298357e-5, -0.203814e-3,
0.170907e-1, 0.665157e-1])
c2 = np.array([-0.846960e-4, 0.378110e-2])
c2a = np.array([-0.251520e-11, -0.164759e-6, 0.0])
c3 = -0.678662e-5
c4 = np.array([0.791325e-8, -0.933746e-6, 0.380374e-4])
c5 = 0.512857e-12
c6 = -0.302285e-13
# Now calculate the thermal expansion saline contraction ratio aonb.
sm35 = s - 35.0
return (np.polyval(c1, t) + sm35 *
(np.polyval(c2, t) + np.polyval(c2a, p)) +
sm35 ** 2 * c3 + p * np.polyval(c4, t) +
c5 * (p ** 2) * (t ** 2) + c6 * p ** 3)
def beta(s, t, p, pt=False):
"""
Calculate the saline contraction coefficient :math:`\\beta` as defined
by T.J. McDougall.
Parameters
----------
s(p) : array_like
salinity [psu (PSS-78)]
t(p) : array_like
temperature or potential temperature [℃ (ITS-90)]
p : array_like
pressure [db].
pt : bool
True if temperature is potential, default is False
Returns
-------
beta : array_like
saline Contraction Coefficient [psu :sup:`-1`]
Examples
--------
>>> # Data from McDougall 1987
>>> import seawater as sw
>>> s, t, p = 40, 10, 4000
>>> sw.beta(s, t, p, pt=True)
0.00072087661741618932
References
----------
.. [1] McDougall, Trevor J., 1987: Neutral Surfaces. J. Phys. Oceanogr.,
17, 1950-1964. doi: 10.1175/1520-0485(1987)017<1950:NS>2.0.CO;2
"""
s, t, p, pt = list(map(np.asanyarray, (s, t, p, pt)))
# Ensure we use ptmp in calculations
if not pt:
t = ptmp(s, t, p, 0) # Now we have ptmp.
t = T68conv(t)
c1 = np.array([-0.415613e-9, 0.555579e-7, -0.301985e-5, 0.785567e-3])
c2 = np.array([0.788212e-8, -0.356603e-6])
c3 = np.array([-0.602281e-15, 0.408195e-10, 0.0])
c4 = 0.515032e-8
c5 = np.array([-0.213127e-11, 0.192867e-9, -0.121555e-7])
c6 = np.array([-0.175379e-14, 0.176621e-12])
c7 = 0.121551e-17
# Now calculate the thermal expansion saline contraction ratio adb
sm35 = s - 35
return (np.polyval(c1, t) + sm35 *
(np.polyval(c2, t) + np.polyval(c3, p)) +
c4 * (sm35 ** 2) + p * np.polyval(c5, t) +
(p ** 2) * np.polyval(c6, t) + c7 * (p ** 3))
def cp(s, t, p):
"""
Heat Capacity of Sea Water using UNESCO 1983 polynomial.
Parameters
----------
s(p) : array_like
salinity [psu (PSS-78)]
t(p) : array_like
temperature [℃ (ITS-90)]
p : array_like
pressure [db].
Returns
-------
cp : array_like
specific heat capacity [J kg :sup:`-1` C :sup:`-1`]
Examples
--------
>>> # Data from Pond and Pickard Intro. Dyn. Oceanography 2nd ed. 1986.
>>> import seawater as sw
>>> from seawater.library import T90conv
>>> t = T90conv([[0, 0, 0, 0, 0, 0],
... [10, 10, 10, 10, 10, 10],
... [20, 20, 20, 20, 20, 20],
... [30, 30, 30, 30, 30, 30],
... [40, 40, 40, 40, 40, 40]])
>>> s = [[25, 25, 25, 35, 35, 35],
... [25, 25, 25, 35, 35, 35],
... [25, 25, 25, 35, 35, 35],
... [25, 25, 25, 35, 35, 35],
... [25, 25, 25, 35, 35, 35]]
>>> p = [0, 5000, 10000, 0, 5000, 10000]
>>> sw.cp(s, t, p)
array([[ 4048.4405375 , 3896.25585 , 3807.7330375 , 3986.53309476,
3849.26094605, 3769.11791286],
[ 4041.8276691 , 3919.5550066 , 3842.3111366 , 3986.34061786,
3874.72665865, 3804.415624 ],
[ 4044.8438591 , 3938.5978466 , 3866.7400391 , 3993.85441786,
3894.99294519, 3828.29059113],
[ 4049.0984351 , 3952.0375476 , 3882.9855526 , 4000.68382238,
3909.24271128, 3844.32151784],
[ 4051.2244911 , 3966.1132036 , 3905.9162711 , 4003.46192541,
3923.89463092, 3868.28959814]])
References
----------
.. [1] Fofonoff, P. and Millard, R.C. Jr UNESCO 1983. Algorithms for
computation of fundamental properties of seawater. UNESCO Tech. Pap. in
Mar. Sci., No. 44, 53 pp. Eqn.(31) p.39.
http://unesdoc.unesco.org/images/0005/000598/059832eb.pdf
"""
s, t, p = list(map(np.asanyarray, (s, t, p)))
p = p / 10. # To convert [db] to [bar] as used in UNESCO routines.
T68 = T68conv(t)
# Eqn. 26 p.32.
a = (-7.64357, 0.1072763, -1.38385e-3)
b = (0.1770383, -4.07718e-3, 5.148e-5)
c = (4217.4, -3.720283, 0.1412855, -2.654387e-3, 2.093236e-5)
Cpst0 = ((((c[4] * T68 + c[3]) * T68 + c[2]) * T68 + c[1]) * T68 + c[0] +
(a[0] + a[1] * T68 + a[2] * T68 ** 2) * s +
(b[0] + b[1] * T68 + b[2] * T68 ** 2) * s * s ** 0.5)
# Eqn. 28 p.33.
a = (-4.9592e-1, 1.45747e-2, -3.13885e-4, 2.0357e-6, 1.7168e-8)
b = (2.4931e-4, -1.08645e-5, 2.87533e-7, -4.0027e-9, 2.2956e-11)
c = (-5.422e-8, 2.6380e-9, -6.5637e-11, 6.136e-13)
del_Cp0t0 = ((((((c[3] * T68 + c[2]) * T68 + c[1]) * T68 + c[0]) * p +
((((b[4] * T68 + b[3]) * T68 + b[2]) * T68 + b[1]) *
T68 + b[0])) * p + ((((a[4] * T68 + a[3]) * T68 + a[2]) *
T68 + a[1]) * T68 + a[0])) * p)
# Eqn 29 p.34.
d = (4.9247e-3, -1.28315e-4, 9.802e-7, 2.5941e-8, -2.9179e-10)
e = (-1.2331e-4, -1.517e-6, 3.122e-8)
f = (-2.9558e-6, 1.17054e-7, -2.3905e-9, 1.8448e-11)
g0 = 9.971e-8
h = (5.540e-10, -1.7682e-11, 3.513e-13)
j1 = -1.4300e-12
S3_2 = s * s ** 0.5
del_Cpstp = ((((((d[4] * T68 + d[3]) * T68 + d[2]) * T68 + d[1]) *
T68 + d[0]) * s + ((e[2] * T68 + e[1]) * T68 + e[0]) *
S3_2) * p +
((((f[3] * T68 + f[2]) * T68 + f[1]) * T68 + f[0]) * s +
g0 * S3_2) * p ** 2 + (((h[2] * T68 + h[1]) * T68 + h[0]) *
s + j1 * T68 * S3_2) * p ** 3)
return Cpst0 + del_Cp0t0 + del_Cpstp
def dens0(s, t):
"""
Density of Sea Water at atmospheric pressure.
Parameters
----------
s(p=0) : array_like
salinity [psu (PSS-78)]
t(p=0) : array_like
temperature [℃ (ITS-90)]
Returns
-------
dens0(s, t) : array_like
density [kg m :sup:`3`] of salt water with properties
(s, t, p=0) 0 db gauge pressure
Examples
--------
>>> # Data from UNESCO Tech. Paper in Marine Sci. No. 44, p22
>>> import seawater as sw
>>> from seawater.library import T90conv
>>> s = [0, 0, 0, 0, 35, 35, 35, 35]
>>> t = T90conv([0, 0, 30, 30, 0, 0, 30, 30])
>>> sw.dens0(s, t)
array([ 999.842594 , 999.842594 , 995.65113374, 995.65113374,
1028.10633141, 1028.10633141, 1021.72863949, 1021.72863949])
References
----------
.. [1] Fofonoff, P. and Millard, R.C. Jr UNESCO 1983. Algorithms for
computation of fundamental properties of seawater. UNESCO Tech. Pap. in
Mar. Sci., No. 44, 53 pp. Eqn.(31) p.39.
http://unesdoc.unesco.org/images/0005/000598/059832eb.pdf
.. [2] Millero, F.J. and Poisson, A. International one-atmosphere
equation of state of seawater. Deep-Sea Res. 1981. Vol28A(6) pp625-629.
doi:10.1016/0198-0149(81)90122-9
"""
s, t = list(map(np.asanyarray, (s, t)))
T68 = T68conv(t)
# UNESCO 1983 Eqn.(13) p17.
b = (8.24493e-1, -4.0899e-3, 7.6438e-5, -8.2467e-7, 5.3875e-9)
c = (-5.72466e-3, 1.0227e-4, -1.6546e-6)
d = 4.8314e-4
return (smow(t) + (b[0] + (b[1] + (b[2] + (b[3] + b[4] * T68) * T68) *
T68) * T68) * s + (c[0] + (c[1] + c[2] * T68) * T68) * s *
s ** 0.5 + d * s ** 2)
def dens(s, t, p):
"""
Density of Sea Water using UNESCO 1983 (EOS 80) polynomial.
Parameters
----------
s(p) : array_like
salinity [psu (PSS-78)]
t(p) : array_like
temperature [℃ (ITS-90)]
p : array_like
pressure [db].
Returns
-------
dens : array_like
density [kg m :sup:`3`]
Examples
--------
>>> # Data from Unesco Tech. Paper in Marine Sci. No. 44, p22.
>>> import seawater as sw
>>> from seawater.library import T90conv
>>> s = [0, 0, 0, 0, 35, 35, 35, 35]
>>> t = T90conv([0, 0, 30, 30, 0, 0, 30, 30])
>>> p = [0, 10000, 0, 10000, 0, 10000, 0, 10000]
>>> sw.dens(s, t, p)
array([ 999.842594 , 1045.33710972, 995.65113374, 1036.03148891,
1028.10633141, 1070.95838408, 1021.72863949, 1060.55058771])
References
----------
.. [1] Fofonoff, P. and Millard, R.C. Jr UNESCO 1983. Algorithms for
computation of fundamental properties of seawater. UNESCO Tech. Pap. in
Mar. Sci., No. 44, 53 pp. Eqn.(31) p.39.
http://unesdoc.unesco.org/images/0005/000598/059832eb.pdf
.. [2] Millero, F.J., Chen, C.T., Bradshaw, A., and Schleicher, K. A new
high pressure equation of state for seawater. Deap-Sea Research., 1980,
Vol27A, pp255-264. doi:10.1016/0198-0149(80)90016-3
"""
s, t, p = list(map(np.asanyarray, (s, t, p)))
# UNESCO 1983. Eqn..7 p.15.
densP0 = dens0(s, t)
K = seck(s, t, p)
p = p / 10. # Convert from db to atm pressure units.
return densP0 / (1 - p / K)
def dpth(p, lat):
"""
Calculates depth in meters from pressure in dbars.
Parameters
----------
p : array_like
pressure [db].
lat : number or array_like
latitude in decimal degrees north [-90..+90].
Returns
-------
z : array_like
depth [meters]
Examples
--------
>>> # UNESCO 1983 data p30.
>>> import seawater as sw
>>> lat = [0, 30, 45, 90]
>>> p = [[ 500, 500, 500, 500],
... [ 5000, 5000, 5000, 5000],
... [10000, 10000, 10000, 10000]]
>>> sw.dpth(p, lat)
array([[ 496.65299239, 495.99772917, 495.3427354 , 494.03357499],
[ 4915.04099112, 4908.55954332, 4902.08075214, 4889.13132561],
[ 9725.47087508, 9712.6530721 , 9699.84050403, 9674.23144056]])
References
----------
.. [1] Fofonoff, P. and Millard, R.C. Jr UNESCO 1983. Algorithms for
computation of fundamental properties of seawater. UNESCO Tech. Pap. in
Mar. Sci., No. 44, 53 pp.
http://unesdoc.unesco.org/images/0005/000598/059832eb.pdf
"""
p, lat = list(map(np.asanyarray, (p, lat)))
# Eqn 25, p26. UNESCO 1983.
c = [9.72659, -2.2512e-5, 2.279e-10, -1.82e-15]
gam_dash = 2.184e-6
lat = abs(lat)
X = np.sin(lat * deg2rad)
X = X * X
bot_line = (9.780318 * (1.0 + (5.2788e-3 + 2.36e-5 * X) * X) +
gam_dash * 0.5 * p)
top_line = (((c[3] * p + c[2]) * p + c[1]) * p + c[0]) * p
return top_line / bot_line
def fp(s, p):
"""
Freezing point of Sea Water using UNESCO 1983 polynomial.
Parameters
----------
s : array_like
salinity [psu (PSS-78)]
p : array_like
pressure [db]
Returns
-------
fp : array_like
freezing point temperature [℃ (ITS-90)]
Examples
--------
>>> # UNESCO DATA p.30.
>>> import seawater as sw
>>> s = [[5, 10, 15, 20, 25, 30, 35, 40],
... [5, 10, 15, 20, 25, 30, 35, 40]]
>>> p = [[ 0, 0, 0, 0, 0, 0, 0, 0],
... [500, 500, 500, 500, 500, 500, 500, 500]]
>>> sw.fp(s, p)
array([[-0.27369757, -0.54232831, -0.81142026, -1.0829461 , -1.35804594,
-1.63748903, -1.9218401 , -2.2115367 ],
[-0.65010724, -0.91873798, -1.18782992, -1.45935577, -1.73445561,
-2.01389869, -2.29824976, -2.58794636]])
References
----------
.. [1] Fofonoff, P. and Millard, R.C. Jr UNESCO 1983. Algorithms for
computation of fundamental properties of seawater. UNESCO Tech. Pap. in
Mar. Sci., No. 44, 53 pp. Eqn.(31) p.39.
http://unesdoc.unesco.org/images/0005/000598/059832eb.pdf
"""
s, p = list(map(np.asanyarray, (s, p)))
# NOTE: P = P/10 # to convert db to Bar as used in UNESCO routines.
# Eqn p.29.
a = [-0.0575, 1.710523e-3, -2.154996e-4]
b = -7.53e-4
return T90conv(a[0] * s + a[1] * s * s ** 0.5 + a[2] * s ** 2 + b * p)
def g(lat, z=0):
"""
Calculates acceleration due to gravity as function of latitude.
Parameters
----------
lat : array_like
latitude in decimal degrees north [-90..+90].
z : number or array_like. Default z = 0
height in meters (+ve above sea surface, -ve below).
Returns
-------
g : array_like
gravity [m s :sup:`-2`]
Examples
--------
>>> import seawater as sw
>>> sw.g(45, z=0)
9.8061898752053995
References
----------
.. [1] Fofonoff, P. and Millard, R.C. Jr UNESCO 1983. Algorithms for
computation of fundamental properties of seawater. UNESCO Tech. Pap. in
Mar. Sci., No. 44, 53 pp. Eqn.(31) p.39.
http://unesdoc.unesco.org/images/0005/000598/059832eb.pdf
.. [2] A.E. Gill 1982. p.54 Eqn. 3.7.15 "Atmosphere-Ocean Dynamics"
Academic Press: New York. ISBN: 0-12-283522-0
"""
lat, z = list(map(np.asanyarray, (lat, z)))
# Eqn p27. UNESCO 1983.
lat = np.abs(lat)
X = np.sin(lat * deg2rad)
sin2 = X * X
grav = 9.780318 * (1.0 + (5.2788e-3 + 2.36e-5 * sin2) * sin2)
return grav / ((1 + z / earth_radius) ** 2) # From A.E.Gill p.597.
def pden(s, t, p, pr=0):
"""
Calculates potential density of water mass relative to the specified
reference pressure by pden = dens(S, ptmp, PR).
Parameters
----------
s(p) : array_like
salinity [psu (PSS-78)]
t(p) : array_like
temperature [℃ (ITS-90)]
p : array_like
pressure [db].
pr : number
reference pressure [db], default = 0
Returns
-------
pden : array_like
potential density relative to the ref. pressure [kg m :sup:3]
Examples
--------
>>> # Data from Unesco Tech. Paper in Marine Sci. No. 44, p22.
>>> import seawater as sw
>>> from seawater.library import T90conv
>>> s = [0, 0, 0, 0, 35, 35, 35, 35]
>>> t = T90conv([0, 0, 30, 30, 0, 0, 30, 30])
>>> p = [0, 10000, 0, 10000, 0, 10000, 0, 10000]
>>> sw.pden(s, t, p)
array([ 999.842594 , 999.79523994, 995.65113374, 996.36115932,
1028.10633141, 1028.15738545, 1021.72863949, 1022.59634627])
:math:`\\sigma_{4}` (at 4000 db)
>>> sw.pden(s, t, p, 4000) - 1000
array([ 19.2895493 , 19.33422519, 12.43271053, 13.27563816,
46.30976432, 46.48818851, 37.76150878, 38.74500757])
References
----------
.. [1] A.E. Gill 1982. p.54 Eqn. 3.7.15 "Atmosphere-Ocean Dynamics"
Academic Press: New York. ISBN: 0-12-283522-0
"""
s, t, p, pr = list(map(np.asanyarray, (s, t, p, pr)))
pt = ptmp(s, t, p, pr)
return dens(s, pt, pr)
def pres(depth, lat):
"""
Calculates pressure in dbars from depth in meters.
Parameters
----------
depth : array_like
depth [meters]
lat : array_like
latitude in decimal degrees north [-90..+90]
Returns
-------
p : array_like
pressure [db]
Examples
--------
>>> import seawater as sw
>>> depth, lat = 7321.45, 30
>>> sw.pres(depth,lat)
7500.0065130118019
References
----------
.. [1] Saunders, Peter M., 1981: Practical Conversion of Pressure to Depth.
J. Phys. Oceanogr., 11, 573-574.
doi: 10.1175/1520-0485(1981)011<0573:PCOPTD>2.0.CO;2
"""
depth, lat = list(map(np.asanyarray, (depth, lat)))
X = np.sin(np.abs(lat * deg2rad))
C1 = 5.92e-3 + X ** 2 * 5.25e-3
return ((1 - C1) - (((1 - C1) ** 2) - (8.84e-6 * depth)) ** 0.5) / 4.42e-6
def ptmp(s, t, p, pr=0):
"""
Calculates potential temperature as per UNESCO 1983 report.
Parameters
----------
s(p) : array_like
salinity [psu (PSS-78)]
t(p) : array_like
temperature [℃ (ITS-90)]
p : array_like
pressure [db].
pr : array_like
reference pressure [db], default = 0
Returns
-------
pt : array_like
potential temperature relative to PR [℃ (ITS-90)]
Examples
--------
>>> import seawater as sw
>>> from seawater.library import T90conv, T68conv
>>> t = T90conv([[0, 0, 0, 0, 0, 0],
... [10, 10, 10, 10, 10, 10],
... [20, 20, 20, 20, 20, 20],
... [30, 30, 30, 30, 30, 30],
... [40, 40, 40, 40, 40, 40]])
>>> s = [[25, 25, 25, 35, 35, 35],
... [25, 25, 25, 35, 35, 35],
... [25, 25, 25, 35, 35, 35],
... [25, 25, 25, 35, 35, 35],
... [25, 25, 25, 35, 35, 35]]
>>> p = [0, 5000, 10000, 0, 5000, 10000]
>>> T68conv(sw.ptmp(s, t, p, pr=0))
array([[ 0. , -0.30614418, -0.96669485, 0. ,
-0.3855565 , -1.09741136],
[ 10. , 9.35306331, 8.46840949, 10. ,
9.29063461, 8.36425752],
[ 20. , 19.04376281, 17.94265 , 20. ,
18.99845171, 17.86536441],
[ 30. , 28.75124632, 27.43529911, 30. ,
28.72313484, 27.38506197],
[ 40. , 38.46068173, 36.92544552, 40. ,
38.44979906, 36.90231661]])
References
----------
.. [1] Fofonoff, P. and Millard, R.C. Jr UNESCO 1983. Algorithms for
computation of fundamental properties of seawater. UNESCO Tech. Pap. in
Mar. Sci., No. 44, 53 pp. Eqn.(31) p.39.
http://unesdoc.unesco.org/images/0005/000598/059832eb.pdf
.. [2] Bryden, H. 1973. New Polynomials for thermal expansion, adiabatic
temperature gradient and potential temperature of sea water. Deep-Sea
Res. Vol20,401-408. doi:10.1016/0011-7471(73)90063-6
"""
s, t, p, pr = list(map(np.asanyarray, (s, t, p, pr)))
# Theta1.
del_P = pr - p
del_th = del_P * adtg(s, t, p)
th = T68conv(t) + 0.5 * del_th
q = del_th
# Theta2.
del_th = del_P * adtg(s, T90conv(th), p + 0.5 * del_P)
th = th + (1 - 1 / 2 ** 0.5) * (del_th - q)
q = (2 - 2 ** 0.5) * del_th + (-2 + 3 / 2 ** 0.5) * q
# Theta3.
del_th = del_P * adtg(s, T90conv(th), p + 0.5 * del_P)
th = th + (1 + 1 / 2 ** 0.5) * (del_th - q)
q = (2 + 2 ** 0.5) * del_th + (-2 - 3 / 2 ** 0.5) * q
# Theta4.
del_th = del_P * adtg(s, T90conv(th), p + del_P)
return T90conv(th + (del_th - 2 * q) / 6)
def salt(r, t, p):
"""
Calculates Salinity from conductivity ratio. UNESCO 1983 polynomial.
Parameters
----------
r : array_like
conductivity ratio :math:`R = \\frac{C(S,T,P)}{C(35,15(IPTS-68),0)}`
t : array_like
temperature [℃ (ITS-90)]
p : array_like
pressure [db]
Returns
-------
s : array_like
salinity [psu (PSS-78)]
Examples
--------
Data from UNESCO 1983 p9.
>>> import seawater as sw
>>> from seawater.library import T90conv
>>> r = [1, 1.2, 0.65]
>>> t = T90conv([15, 20, 5])
>>> p = [0, 2000, 1500]
>>> sw.salt(r, t, p)
array([ 34.99999992, 37.24562765, 27.99534693])
References
----------
.. [1] Fofonoff, P. and Millard, R.C. Jr UNESCO 1983. Algorithms for
computation of fundamental properties of seawater. UNESCO Tech. Pap.
in Mar. Sci., No. 44, 53 pp. Eqn.(31) p.39.
http://unesdoc.unesco.org/images/0005/000598/059832eb.pdf
"""
r, t, p = list(map(np.asanyarray, (r, t, p)))
rt = salrt(t)
rp = salrp(r, t, p)
rt = r / (rp * rt)
return sals(rt, t)
def svel(s, t, p):
"""
Sound Velocity in sea water using UNESCO 1983 polynomial.
Parameters
----------
s(p) : array_like
salinity [psu (PSS-78)]
t(p) : array_like
temperature [℃ (ITS-90)]
p : array_like
pressure [db].
Returns
-------
svel : array_like
sound velocity [m/s]
Examples
--------
Data from Pond and Pickard Intro. Dynamical Oceanography 2nd ed. 1986
>>> import seawater as sw
>>> from seawater.library import T90conv
>>> t = T90conv([[ 0, 0, 0, 0, 0, 0],
... [ 10, 10, 10, 10, 10, 10],
... [ 20, 20, 20, 20, 20, 20],
... [ 30, 30, 30, 30, 30, 30],
... [ 40, 40, 40, 40, 40, 40]])
>>> s = [[ 25, 25, 25, 35, 35, 35],
... [ 25, 25, 25, 35, 35, 35],
... [ 25, 25, 25, 35, 35, 35],
... [ 25, 25, 25, 35, 35, 35],
... [ 25, 25, 25, 35, 35, 35]]
>>> p = [ 0, 5000, 10000, 0, 5000, 10000]
>>> sw.svel(s, t, p)
array([[ 1435.789875 , 1520.358725 , 1610.4074 , 1449.13882813,
1533.96863705, 1623.15007097],
[ 1477.68316464, 1561.30635914, 1647.39267114, 1489.82233602,
1573.40946928, 1658.99115504],
[ 1510.31388348, 1593.59671798, 1676.80967748, 1521.4619731 ,
1604.4762822 , 1687.18305631],
[ 1535.21434752, 1618.95631952, 1700.60547902, 1545.59485539,
1628.97322783, 1710.06294277],
[ 1553.44506636, 1638.02522336, 1719.15088536, 1563.20925247,
1647.29949576, 1727.83176404]])
References
----------
.. [1] Fofonoff, P. and Millard, R.C. Jr UNESCO 1983. Algorithms for
computation of fundamental properties of seawater. UNESCO Tech. Pap. in
Mar. Sci., No. 44, 53 pp. Eqn.(31) p.39.
http://unesdoc.unesco.org/images/0005/000598/059832eb.pdf
"""
s, t, p = list(map(np.asanyarray, (s, t, p)))
# UNESCO 1983. Eqn..33 p.46.
p = p / 10 # Convert db to bars as used in UNESCO routines.
T68 = T68conv(t)
# Eqn 34 p.46.
c00, c01, c02, c03, c04, c05 = (1402.388, 5.03711, -5.80852e-2, 3.3420e-4,
-1.47800e-6, 3.1464e-9)
c10, c11, c12, c13, c14 = (0.153563, 6.8982e-4, -8.1788e-6, 1.3621e-7,
-6.1185e-10)
c20, c21, c22, c23, c24 = (3.1260e-5, -1.7107e-6, 2.5974e-8, -2.5335e-10,
1.0405e-12)
c30, c31, c32 = (-9.7729e-9, 3.8504e-10, -2.3643e-12)
Cw = (((((c32 * T68 + c31) * T68 + c30) * p +
((((c24 * T68 + c23) * T68 + c22) * T68 + c21) * T68 + c20)) * p +
((((c14 * T68 + c13) * T68 + c12) * T68 + c11) * T68 + c10)) *
p + ((((c05 * T68 + c04) * T68 + c03) * T68 + c02) * T68 + c01) *
T68 + c00)
# Eqn. 35. p.47
a00, a01, a02, a03, a04 = (1.389, -1.262e-2, 7.164e-5, 2.006e-6, -3.21e-8)
a10, a11, a12, a13, a14 = (9.4742e-5, -1.2580e-5, -6.4885e-8, 1.0507e-8,
-2.0122e-10)
a20, a21, a22, a23 = (-3.9064e-7, 9.1041e-9, -1.6002e-10, 7.988e-12)
a30, a31, a32 = (1.100e-10, 6.649e-12, -3.389e-13)
A = (((((a32 * T68 + a31) * T68 + a30) * p +
(((a23 * T68 + a22) * T68 + a21) * T68 + a20)) * p +
((((a14 * T68 + a13) * T68 + a12) * T68 + a11) * T68 + a10)) * p +
(((a04 * T68 + a03) * T68 + a02) * T68 + a01) * T68 + a00)
# Eqn 36 p.47.
b00, b01, b10, b11 = -1.922e-2, -4.42e-5, 7.3637e-5, 1.7945e-7
B = b00 + b01 * T68 + (b10 + b11 * T68) * p
# Eqn 37 p.47.
d00, d10 = 1.727e-3, -7.9836e-6
D = d00 + d10 * p
# Eqn 33 p.46.
return Cw + A * s + B * s * s ** 0.5 + D * s ** 2
def temp(s, pt, p, pr=0):
"""
Calculates temperature from potential temperature at the reference
pressure PR and in situ pressure P.
Parameters
----------
s(p) : array_like
salinity [psu (PSS-78)]
pt(p) : array_like
potential temperature [℃ (ITS-90)]
p : array_like
pressure [db].
pr : array_like
reference pressure [db]
Returns
-------
temp : array_like
temperature [℃ (ITS-90)]
Examples
--------
>>> import seawater as sw
>>> s, t, p = 35, 15, 100
>>> sw.temp(s, sw.ptmp(s, t, p), p)
15.0
References
----------
.. [1] Fofonoff, P. and Millard, R.C. Jr UNESCO 1983. Algorithms for
computation of fundamental properties of seawater. UNESCO Tech. Pap. in
Mar. Sci., No. 44, 53 pp. Eqn.(31) p.39.
http://unesdoc.unesco.org/images/0005/000598/059832eb.pdf
.. [2] Bryden, H. 1973. New Polynomials for thermal expansion, adiabatic
temperature gradient and potential temperature of sea water. Deep-Sea
Res. Vol20,401-408. doi:10.1016/0011-7471(73)90063-6
"""
s, pt, p, pr = list(map(np.asanyarray, (s, pt, p, pr)))
# Carry out inverse calculation by swapping p0 & pr.
return ptmp(s, pt, pr, p)
| mit |
TravisCG/SI_scripts | deepsnvres2vcf.py | 1 | 4185 | #!/usr/bin/python
"""
Read the deepSNV res (the concatenated one, where the sample name is the first column)
And convert it to VCF, to feed it into Vagrent
"""
import sys
import math
import decimal
class Vari:
def __init__(self):
self.chr = ""
self.pos = 0
self.ref = ""
self.alt = ""
self.pval = list()
self.VAF = list()
self.cov = list()
self.FDR = list()
def getStr(self):
avgCov = str(sum(self.cov) / len(self.cov))
avgpv = str(sum(self.pval) / len(self.pval))
avgvaf = str(sum(self.VAF) / len(self.VAF))
avgfdr = str(sum(self.FDR) / len(self.FDR))
return "%s:%s:%s:%s" % (avgpv, avgCov, avgfdr, avgvaf)
for i in range(len(sys.argv)):
if sys.argv[i] == "-r":
refname = sys.argv[i+1]
if sys.argv[i] == "-b":
bigname = sys.argv[i+1]
# Print header
print "##fileformat=VCFv4.1"
print '##FORMAT=<ID=PVAL,Number=1,Type=Float,Description="Raw p-value">'
print '##FORMAT=<ID=DP,Number=1,Type=Integer,Description="Coverage">'
print '##FORMAT=<ID=FDR,Number=1,Type=Float,Description="False discovery rate">'
print '##FORMAT=<ID=VAF,Number=1,Type=Float,Description="Variant allele frequency">'
reference = dict()
for i in open(refname):
if i.startswith(">"):
header = i.rstrip()[1:].split()[0]
reference[header] = list()
else:
reference[header].append(i.rstrip())
for i in reference:
reference[i] = "".join(reference[i])
storage = dict()
bigtable = open(bigname)
bigtable.readline()
for i in bigtable:
fields = i.rstrip().split()
var = Vari()
sampleid = fields[0]
var.chr = fields[1]
var.pos = int(fields[2]) - 1
var.ref = fields[3]
var.alt = fields[4]
var.FDR.append(decimal.Decimal(fields[5]))
a = decimal.Decimal(fields[8])
b = decimal.Decimal(fields[9])
c = decimal.Decimal(fields[10])
d = decimal.Decimal(fields[11])
vaf = (a + c) / (b + d)
var.VAF.append( vaf )
var.cov.append( int(fields[9]) + int(fields[11]) )
var.pval.append(decimal.Decimal(fields[17]))
if var.ref != "-" and var.alt != "-" and var.ref != reference[var.chr][var.pos] and var.alt == reference[var.chr][var.pos]:
var.ref = fields[4]
var.alt = fields[3]
if sampleid not in storage:
storage[sampleid] = list()
storage[sampleid].append(var)
body = dict()
samples = storage.keys()
print "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t" + "\t".join(samples)
for sampleid in samples:
s = sorted(storage[sampleid], key = lambda x: x.pos)
prevpos = 0
tmp = dict()
for i in s:
if i.pos - prevpos == 1 and (i.ref == "-" or i.alt == "-") and ("-" in tmp[storepos].ref or "-" in tmp[storepos].alt):
tmp[storepos].ref += i.ref
tmp[storepos].alt += i.alt
tmp[storepos].pval += i.pval
tmp[storepos].VAF += i.VAF
tmp[storepos].cov += i.cov
tmp[storepos].FDR += i.FDR
else:
storepos = i.pos
tmp[storepos] = Vari()
tmp[storepos].chr = i.chr
tmp[storepos].pos = storepos
tmp[storepos].ref = i.ref
tmp[storepos].alt = i.alt
tmp[storepos].pval = i.pval
tmp[storepos].VAF = i.VAF
tmp[storepos].cov = i.cov
tmp[storepos].FDR = i.FDR
prevpos = i.pos
for i in tmp:
if tmp[i].ref.startswith("-"):
# insertion
tmp[i].pos -= 1
tmp[i].ref = reference[tmp[i].chr][tmp[i].pos]
tmp[i].alt = tmp[i].ref + tmp[i].alt
elif tmp[i].alt.startswith("-"):
# deletion
tmp[i].pos -= 1
tmp[i].alt = reference[tmp[i].chr][tmp[i].pos]
tmp[i].ref = tmp[i].alt + tmp[i].ref
key = tmp[i].chr + "_" + str(tmp[i].pos) + "_" + tmp[i].ref + "_" + tmp[i].alt
if key not in body:
body[key] = dict()
body[key][sampleid] = tmp[i].getStr()
# try:
# body[key][sampleid] = int(tmp[i].pval.log10() * decimal.Decimal('-10'))
# except OverflowError:
# body[key][sampleid] = 1000
sr = sorted( [x.split("_") for x in body.keys()], key = lambda x: (x[0], int(x[1])) )
for k in sr:
chrx, pos, ref, alt = k
pos = str(int(pos) + 1)
key = "_".join(k)
out = list()
out.append(chrx)
out.append(pos)
out.append(".")
out.append(ref)
out.append(alt)
out.append(".")
out.append("PASS")
out.append(".")
out.append("PVAL:DP:FDR:VAF")
for s in samples:
if s in body[key]:
out.append(body[key][s])
else:
out.append(".")
print "\t".join(out)
| gpl-3.0 |
digimarc/django | django/contrib/auth/admin.py | 11 | 8644 | from django.conf import settings
from django.conf.urls import url
from django.contrib import admin, messages
from django.contrib.admin.options import IS_POPUP_VAR
from django.contrib.admin.utils import unquote
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.forms import (
AdminPasswordChangeForm, UserChangeForm, UserCreationForm,
)
from django.contrib.auth.models import Group, User
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.db import transaction
from django.http import Http404, HttpResponseRedirect
from django.template.response import TemplateResponse
from django.utils.decorators import method_decorator
from django.utils.encoding import force_text
from django.utils.html import escape
from django.utils.translation import ugettext, ugettext_lazy as _
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
csrf_protect_m = method_decorator(csrf_protect)
sensitive_post_parameters_m = method_decorator(sensitive_post_parameters())
class GroupAdmin(admin.ModelAdmin):
search_fields = ('name',)
ordering = ('name',)
filter_horizontal = ('permissions',)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
if db_field.name == 'permissions':
qs = kwargs.get('queryset', db_field.rel.to.objects)
# Avoid a major performance hit resolving permission names which
# triggers a content_type load:
kwargs['queryset'] = qs.select_related('content_type')
return super(GroupAdmin, self).formfield_for_manytomany(
db_field, request=request, **kwargs)
class UserAdmin(admin.ModelAdmin):
add_form_template = 'admin/auth/user/add_form.html'
change_user_password_template = None
fieldsets = (
(None, {'fields': ('username', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name', 'email')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'password1', 'password2'),
}),
)
form = UserChangeForm
add_form = UserCreationForm
change_password_form = AdminPasswordChangeForm
list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff')
list_filter = ('is_staff', 'is_superuser', 'is_active', 'groups')
search_fields = ('username', 'first_name', 'last_name', 'email')
ordering = ('username',)
filter_horizontal = ('groups', 'user_permissions',)
def get_fieldsets(self, request, obj=None):
if not obj:
return self.add_fieldsets
return super(UserAdmin, self).get_fieldsets(request, obj)
def get_form(self, request, obj=None, **kwargs):
"""
Use special form during user creation
"""
defaults = {}
if obj is None:
defaults['form'] = self.add_form
defaults.update(kwargs)
return super(UserAdmin, self).get_form(request, obj, **defaults)
def get_urls(self):
return [
url(r'^(.+)/password/$', self.admin_site.admin_view(self.user_change_password), name='auth_user_password_change'),
] + super(UserAdmin, self).get_urls()
def lookup_allowed(self, lookup, value):
# See #20078: we don't want to allow any lookups involving passwords.
if lookup.startswith('password'):
return False
return super(UserAdmin, self).lookup_allowed(lookup, value)
@sensitive_post_parameters_m
@csrf_protect_m
@transaction.atomic
def add_view(self, request, form_url='', extra_context=None):
# It's an error for a user to have add permission but NOT change
# permission for users. If we allowed such users to add users, they
# could create superusers, which would mean they would essentially have
# the permission to change users. To avoid the problem entirely, we
# disallow users from adding users if they don't have change
# permission.
if not self.has_change_permission(request):
if self.has_add_permission(request) and settings.DEBUG:
# Raise Http404 in debug mode so that the user gets a helpful
# error message.
raise Http404(
'Your user does not have the "Change user" permission. In '
'order to add users, Django requires that your user '
'account have both the "Add user" and "Change user" '
'permissions set.')
raise PermissionDenied
if extra_context is None:
extra_context = {}
username_field = self.model._meta.get_field(self.model.USERNAME_FIELD)
defaults = {
'auto_populated_fields': (),
'username_help_text': username_field.help_text,
}
extra_context.update(defaults)
return super(UserAdmin, self).add_view(request, form_url,
extra_context)
@sensitive_post_parameters_m
def user_change_password(self, request, id, form_url=''):
if not self.has_change_permission(request):
raise PermissionDenied
user = self.get_object(request, unquote(id))
if user is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(self.model._meta.verbose_name),
'key': escape(id),
})
if request.method == 'POST':
form = self.change_password_form(user, request.POST)
if form.is_valid():
form.save()
change_message = self.construct_change_message(request, form, None)
self.log_change(request, user, change_message)
msg = ugettext('Password changed successfully.')
messages.success(request, msg)
update_session_auth_hash(request, form.user)
return HttpResponseRedirect(
reverse(
'%s:auth_%s_change' % (
self.admin_site.name,
user._meta.model_name,
),
args=(user.pk,),
)
)
else:
form = self.change_password_form(user)
fieldsets = [(None, {'fields': list(form.base_fields)})]
adminForm = admin.helpers.AdminForm(form, fieldsets, {})
context = {
'title': _('Change password: %s') % escape(user.get_username()),
'adminForm': adminForm,
'form_url': form_url,
'form': form,
'is_popup': (IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
'add': True,
'change': False,
'has_delete_permission': False,
'has_change_permission': True,
'has_absolute_url': False,
'opts': self.model._meta,
'original': user,
'save_as': False,
'show_save': True,
}
context.update(admin.site.each_context(request))
request.current_app = self.admin_site.name
return TemplateResponse(request,
self.change_user_password_template or
'admin/auth/user/change_password.html',
context)
def response_add(self, request, obj, post_url_continue=None):
"""
Determines the HttpResponse for the add_view stage. It mostly defers to
its superclass implementation but is customized because the User model
has a slightly different workflow.
"""
# We should allow further modification of the user just added i.e. the
# 'Save' button should behave like the 'Save and continue editing'
# button except in two scenarios:
# * The user has pressed the 'Save and add another' button
# * We are adding a user in a popup
if '_addanother' not in request.POST and IS_POPUP_VAR not in request.POST:
request.POST['_continue'] = 1
return super(UserAdmin, self).response_add(request, obj,
post_url_continue)
admin.site.register(Group, GroupAdmin)
admin.site.register(User, UserAdmin)
| bsd-3-clause |
toastdriven/eliteracing | courses/migrations/0002_initial_courses.py | 1 | 5357 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import os
from django.db import migrations, models
def create_course(Commander, Course, data):
cmdr, _ = Commander.objects.get_or_create(
name=data['created_by']
)
course, _ = Course.objects.get_or_create(
title=data['title'],
defaults={
'system': data['system'],
'course_type': data['course_type'],
'nearby_outfitting': data['nearby_outfitting'],
'distance_from_primary': data['distance_from_primary'],
'distance_from_sol': data['distance_from_sol'],
'notes': data['notes'],
'created_by': cmdr,
}
)
return course
def create_initial_courses(apps, schema_editor):
Commander = apps.get_model("cmdrs", "Commander")
Course = apps.get_model("courses", "Course")
ZeroGravityCourse = apps.get_model("courses", "ZeroGravityCourse")
SurfaceCourse = apps.get_model("courses", "SurfaceCourse")
SRVRallyCourse = apps.get_model("courses", "SRVRallyCourse")
SRVCrossCourse = apps.get_model("courses", "SRVCrossCourse")
StadiumCourse = apps.get_model("courses", "StadiumCourse")
data_dir = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'data'
)
zg_file = os.path.join(data_dir, 'zero_gravity.json')
surface_file = os.path.join(data_dir, 'surface.json')
rally_file = os.path.join(data_dir, 'srvrally.json')
cross_file = os.path.join(data_dir, 'srvcross.json')
stadium_file = os.path.join(data_dir, 'stadium.json')
with open(zg_file) as raw_zg_data:
zg_data = json.load(raw_zg_data)
for data in zg_data:
course = create_course(Commander, Course, data)
zg, _ = ZeroGravityCourse.objects.get_or_create(
course=course,
defaults={
"vehicle_type": data['course_info']['vehicle_type'],
"station_name": data['course_info']['station_name'],
"number_of_rings": data['course_info']['number_of_rings'],
"length": data['course_info']['length'],
}
)
with open(surface_file) as raw_surface_data:
surface_data = json.load(raw_surface_data)
for data in surface_data:
course = create_course(Commander, Course, data)
surface, _ = SurfaceCourse.objects.get_or_create(
course=course,
defaults={
"vehicle_type": data['course_info']['vehicle_type'],
"planet_name": data['course_info']['planet_name'],
"coordinates": data['course_info']['coordinates'],
"gravity": data['course_info']['gravity'],
}
)
with open(rally_file) as raw_rally_data:
rally_data = json.load(raw_rally_data)
for data in rally_data:
course = create_course(Commander, Course, data)
rally, _ = SRVRallyCourse.objects.get_or_create(
course=course,
defaults={
"vehicle_type": data['course_info']['vehicle_type'],
"planet_name": data['course_info']['planet_name'],
"length": data['course_info']['length'],
"start_port_name": data['course_info']['start_port_name'],
"end_port_name": data['course_info']['end_port_name'],
"starting_line": data['course_info']['starting_line'],
"finish_line": data['course_info']['finish_line'],
"gravity": data['course_info']['gravity'],
"planet_type": data['course_info']['planet_type'],
}
)
with open(cross_file) as raw_cross_data:
cross_data = json.load(raw_cross_data)
for data in cross_data:
course = create_course(Commander, Course, data)
cross, _ = SRVCrossCourse.objects.get_or_create(
course=course,
defaults={
"vehicle_type": data['course_info']['vehicle_type'],
"planet_name": data['course_info']['planet_name'],
"port_name": data['course_info']['port_name'],
"gravity": data['course_info']['gravity'],
"tidally_locked": data['course_info']['tidally_locked'],
}
)
with open(stadium_file) as raw_stadium_data:
stadium_data = json.load(raw_stadium_data)
for data in stadium_data:
course = create_course(Commander, Course, data)
stadium, _ = StadiumCourse.objects.get_or_create(
course=course,
defaults={
"vehicle_type": data['course_info']['vehicle_type'],
"planet_name": data['course_info']['planet_name'],
"port_name": data['course_info']['port_name'],
"gravity": data['course_info']['gravity'],
}
)
class Migration(migrations.Migration):
dependencies = [
('cmdrs', '0001_initial'),
('courses', '0001_initial'),
]
operations = [
migrations.RunPython(create_initial_courses),
] | bsd-3-clause |
mbiciunas/nix | src/cli_nix/cli_tags.py | 1 | 1531 | # Nix
# Copyright (c) 2017 Mark Biciunas.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import logging
from config.tag.list_tag import ListTag
LOG = logging.getLogger(__name__)
def add_subparser(subparsers: argparse._SubParsersAction):
"""
Add a command line subparser for displaying a list of available tags.
:param subparsers: Object that will contain the argument definitions.
:type subparsers: ArgumentParser
"""
LOG.debug("Define a cli parser for running scripts")
_subparser = subparsers.add_parser('tags',
help='List all tags.')
_subparser.set_defaults(func=_process)
def _process(args):
"""Process a command line action for displaying a list of available tags.
:param args: Command line arguments
:type args: Namespace
"""
LOG.info("Begin action to list the tags")
_list_tag = ListTag()
_list_tag.list()
| gpl-3.0 |
enzochiau/bitbucketcli | bitbucket/groups.py | 1 | 11015 | # Copyright (c) 2013 Yogesh Panchal, yspanchal@gmail.com
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import imp
import json
import logging
import requests
import prettytable
from os.path import expanduser
from cliff.command import Command
try:
home = expanduser("~")
filename = os.path.join(home, '.bitbucket.py')
creds = imp.load_source('.bitbucket', filename)
user = creds.username
passwd = creds.passwd
except (IOError, NameError):
pass
class Groups(Command):
"""
* Get list groups & respective members
"""
log = logging.getLogger(__name__ + '.Groups')
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
def get_parser(self, prog_name):
parser = super(Groups, self).get_parser(prog_name)
parser.add_argument(
'--account',
'-a',
required=True,
metavar='<account>',
help='The account name')
parser.add_argument(
'--name',
'-n',
metavar='<group_name>',
help='The group name')
return parser
def take_action(self, parsed_args):
self.log.debug('take_action({a})'.format(a=parsed_args))
url = ("https://bitbucket.org/api/1.0/"
"groups/{a.account}/").format(a=parsed_args)
r = requests.get(url, auth=(user, passwd))
if r.status_code == 200:
data = json.loads(r.text)
if len(data) != 0:
for group in data:
newdata = prettytable.PrettyTable(["Group Name", "Members"])
newdata.padding_width = 1
newdata.add_row([group['name'], ""])
for member in group['members']:
newdata.add_row(["", member['username']])
print(newdata)
sys.exit(0)
else:
print("\n No groups found.\n")
else:
self.app.stdout.write(
'\n Error: ' + '"' + str(r.status_code) + '"' +
' Invalid request, Invalid Account name ' + '"' +
parsed_args.account + '\n\n')
sys.exit(1)
class Creategroup(Command):
"""
* Create new group
"""
log = logging.getLogger(__name__ + '.Creategroup')
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
def get_parser(self, prog_name):
parser = super(Creategroup, self).get_parser(prog_name)
parser.add_argument(
'--account',
'-a',
required=True,
metavar='<account>',
help='The account name')
parser.add_argument(
'--name',
'-n',
required=True,
metavar='<group name>',
help='The group name')
parser.add_argument(
'--permission',
'-p',
required=True,
metavar='<repo_permission>',
choices=[
'read',
'write',
'admin'],
help='The group name')
parser.add_argument(
'--autoadd',
'-A',
metavar='<auto_add>',
choices=[
'true',
'false'],
help='Auto add')
return parser
def take_action(self, parsed_args):
self.log.debug('take_action({a})'.format(a=parsed_args))
url = ("https://bitbucket.org/api/1.0/"
"groups/{a.account}/").format(a=parsed_args)
args = {}
args['name'] = parsed_args.name
args['permission'] = parsed_args.permission
if parsed_args.autoadd:
args['auto_add'] = parsed_args.autoadd
r = requests.post(url, data=args, auth=(user, passwd))
if r.status_code == 200:
data = json.loads(r.text)
msg = """
New group created."
Group Name: {d[name]}
Group Owner: {d[owner][username]}
Group Permission: {d[permission]}
"""
print(msg.format(d=data))
sys.exit(0)
elif r.status_code == 400:
msg = """
Error: {r.status_code} Bad request."
A group with name '{a.name}' already exists.
"""
print(msg.format(r=r, a=parsed_args))
sys.exit(1)
else:
msg = """
Error: "{r.status_code}" Invalid request, Invalid Account name "{a.account}"
"""
self.app.stdout.write(msg.format(r=r, a=parsed_args))
sys.exit(1)
class Deletegroup(Command):
"""
* Delete existing group
"""
log = logging.getLogger(__name__ + '.Deletegroup')
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
def get_parser(self, prog_name):
parser = super(Deletegroup, self).get_parser(prog_name)
parser.add_argument(
'--account',
'-a',
required=True,
metavar='<account>',
help='The account name')
parser.add_argument(
'--name',
'-n',
required=True,
metavar='<group name>',
help='The group name')
return parser
def take_action(self, parsed_args):
self.log.debug('take_action({a})'.format(a=parsed_args))
url = ("https://bitbucket.org/api/1.0/"
"groups/{a.account}/{a.name}/").format(a=parsed_args)
r = requests.delete(url, auth=(user, passwd))
if r.status_code == 204:
print("\n Group '{a.name}' deleted.\n".format(a=parsed_args))
sys.exit(0)
else:
msg = """
Error: "{r.status_code}" Invalid request
"""
self.app.stdout.write(msg.format(r=r))
sys.exit(1)
class Groupmembers(Command):
"""
* Get members for group
"""
log = logging.getLogger(__name__ + '.Groupmembers')
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
def get_parser(self, prog_name):
parser = super(Groupmembers, self).get_parser(prog_name)
parser.add_argument(
'--account',
'-a',
required=True,
metavar='<account>',
help='The account name')
parser.add_argument(
'--name',
'-n',
required=True,
metavar='<group name>',
help='The group name')
return parser
def take_action(self, parsed_args):
self.log.debug('take_action({a})'.format(a=parsed_args))
url = ("https://bitbucket.org/api/1.0/"
"groups/{a.account}/{a.name}/members/").format(a=parsed_args)
r = requests.get(url, auth=(user, passwd))
if r.status_code == 200:
data = json.loads(r.text)
print("\n Group Name: {a.name}".format(a=parsed_args))
newdata = prettytable.PrettyTable()
newdata.padding_width = 1
newdata.add_column("Members", [i['username'] for i in data])
print(newdata)
else:
self.app.stdout.write(
'\n Error: ' + '"' + str(r.status_code) + '"' +
' Invalid request \n\n')
sys.exit(1)
class Addgroupmember(Command):
"""
* Add new member in group
"""
log = logging.getLogger(__name__ + '.Addgroupmember')
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
def get_parser(self, prog_name):
parser = super(Addgroupmember, self).get_parser(prog_name)
parser.add_argument(
'--account',
'-a',
required=True,
metavar='<account>',
help='The account name')
parser.add_argument(
'--name',
'-n',
required=True,
metavar='<group_name>',
help='The group name')
parser.add_argument(
'--member',
'-m',
required=True,
metavar='<member_account>',
help='The member name')
return parser
def take_action(self, parsed_args):
self.log.debug('take_action({a})'.format(a=parsed_args))
url = ("https://bitbucket.org/api/1.0/"
"groups/{a.account}/{a.name}/"
"members/{a.member}/").format(a=parsed_args)
r = requests.put(url, auth=(user, passwd))
if r.status_code == 200:
msg = """
User '{a.member}' added to group '{a.name}'
"""
print(msg.format(a=parsed_args))
sys.exit(0)
elif r.status_code == 409:
msg = """
'Conflict/Duplicate' User '{a.member}' present in group
"""
print(msg.format(a=parsed_args))
sys.exit(1)
else:
msg = """
Error: "{r.status_code}" Invalid request
"""
self.app.stdout.write(msg.format(r=r))
sys.exit(1)
class Deletegroupmember(Command):
"""
* Delete member from group
"""
log = logging.getLogger(__name__ + '.Deletegroupmember')
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
def get_parser(self, prog_name):
parser = super(Deletegroupmember, self).get_parser(prog_name)
parser.add_argument(
'--account',
'-a',
required=True,
metavar='<account>',
help='The account name')
parser.add_argument(
'--name',
'-n',
required=True,
metavar='<group_name>',
help='The group name')
parser.add_argument(
'--member',
'-m',
required=True,
metavar='<member_account>',
help='The member name')
return parser
def take_action(self, parsed_args):
self.log.debug('take_action({a})'.format(a=parsed_args))
url = ("https://bitbucket.org/api/1.0/"
"groups/{a.account}/{a.name}/"
"members/{a.member}/").format(a=parsed_args)
r = requests.delete(url, auth=(user, passwd))
if r.status_code == 204:
msg = """
User '{a.member}' removed from group '{a.name}'
"""
print(msg.format(a=parsed_args))
sys.exit(0)
else:
msg = """
Error: "{r.status_code}" Invalid request
"""
self.app.stdout.write(msg.format(r=r))
sys.exit(1)
| apache-2.0 |
hellfish2/treeio | account/cron.py | 3 | 3417 | # encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
Account cron jobs
"""
import codecs
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.db.models import Q
from django.utils.html import strip_tags
from django.contrib.sites.models import Site
from datetime import datetime, timedelta
from treeio.core.mail import SystemEmail
from treeio.core.models import UpdateRecord
from treeio.account.models import NotificationSetting, Notification
class CronNotifier:
def __init__(self):
self.next_daily = datetime.now()
def send_notification(self, note, records):
message_html = codecs.getwriter("utf8")(StringIO())
message_html.write(note.title())
current_url = None
for record in records:
if current_url != record.url:
current_url = record.url
message_html.write(
u'<br /><br />\n\n<a href="%s">' % unicode(record.url))
if record.sender:
message_html.write(u'%s</a> (%s):<br />\n' %
(unicode(record.sender), unicode(record.sender.get_human_type())))
else:
message_html.write(
u'%s</a>:<br />\n' % unicode(record.url))
message_html.write('-' * 30)
message_html.write('<br /><br />\n\n')
message_html.write(u'%s:<br />\n%s - %s<br /><br />\n\n' %
(unicode(record.author), unicode(record.date_created.isoformat()), record.get_full_message()))
signature = "This is an automated message from Tree.io service (http://tree.io). Please do not reply to this e-mail."
subject = "%s summary of [Tree.io] %s" % (
note.get_ntype_display(), unicode(note.owner),)
# send email notification to recipient
try:
toaddr = note.owner.get_contact().get_email()
except:
toaddr = None
if toaddr:
html = message_html.getvalue()
html = html.replace(
'href="', 'href="http://' + Site.objects.get_current().domain)
body = strip_tags(html)
SystemEmail(
toaddr, subject, body, signature, html + signature).send_email()
Notification(
recipient=note.owner, body=html, ntype=note.ntype).save()
def send_notifications(self):
"Run sending some notifications"
now = datetime.now()
if self.next_daily <= now:
notes = NotificationSetting.objects.filter(
next_date__lte=now.date(), enabled=True)
for note in notes:
query = Q()
for module in note.modules.all():
query = query | Q(
about__object_type__icontains=module.name)
query = query & Q(date_created__gte=note.last_datetime) \
& (Q(author=note.owner_id) | Q(recipients=note.owner_id))
self.send_notification(note, UpdateRecord.objects.filter(
query).distinct().order_by('url', '-date_created'))
note.update_date(now)
self.next_daily = datetime(
now.year, now.month, now.day) + timedelta(days=1)
| mit |
sushramesh/lwc | lib/python2.7/site-packages/django/template/utils.py | 308 | 4736 | import os
import warnings
from collections import Counter, OrderedDict
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import lru_cache
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
class InvalidTemplateEngineError(ImproperlyConfigured):
pass
class EngineHandler(object):
def __init__(self, templates=None):
"""
templates is an optional list of template engine definitions
(structured like settings.TEMPLATES).
"""
self._templates = templates
self._engines = {}
@cached_property
def templates(self):
if self._templates is None:
self._templates = settings.TEMPLATES
if not self._templates:
warnings.warn(
"You haven't defined a TEMPLATES setting. You must do so "
"before upgrading to Django 1.10. Otherwise Django will be "
"unable to load templates.", RemovedInDjango110Warning)
self._templates = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': settings.TEMPLATE_DIRS,
'OPTIONS': {
'allowed_include_roots': settings.ALLOWED_INCLUDE_ROOTS,
'context_processors': settings.TEMPLATE_CONTEXT_PROCESSORS,
'debug': settings.TEMPLATE_DEBUG,
'loaders': settings.TEMPLATE_LOADERS,
'string_if_invalid': settings.TEMPLATE_STRING_IF_INVALID,
},
},
]
templates = OrderedDict()
backend_names = []
for tpl in self._templates:
tpl = tpl.copy()
try:
# This will raise an exception if 'BACKEND' doesn't exist or
# isn't a string containing at least one dot.
default_name = tpl['BACKEND'].rsplit('.', 2)[-2]
except Exception:
invalid_backend = tpl.get('BACKEND', '<not defined>')
raise ImproperlyConfigured(
"Invalid BACKEND for a template engine: {}. Check "
"your TEMPLATES setting.".format(invalid_backend))
tpl.setdefault('NAME', default_name)
tpl.setdefault('DIRS', [])
tpl.setdefault('APP_DIRS', False)
tpl.setdefault('OPTIONS', {})
templates[tpl['NAME']] = tpl
backend_names.append(tpl['NAME'])
counts = Counter(backend_names)
duplicates = [alias for alias, count in counts.most_common() if count > 1]
if duplicates:
raise ImproperlyConfigured(
"Template engine aliases aren't unique, duplicates: {}. "
"Set a unique NAME for each engine in settings.TEMPLATES."
.format(", ".join(duplicates)))
return templates
def __getitem__(self, alias):
try:
return self._engines[alias]
except KeyError:
try:
params = self.templates[alias]
except KeyError:
raise InvalidTemplateEngineError(
"Could not find config for '{}' "
"in settings.TEMPLATES".format(alias))
# If importing or initializing the backend raises an exception,
# self._engines[alias] isn't set and this code may get executed
# again, so we must preserve the original params. See #24265.
params = params.copy()
backend = params.pop('BACKEND')
engine_cls = import_string(backend)
engine = engine_cls(params)
self._engines[alias] = engine
return engine
def __iter__(self):
return iter(self.templates)
def all(self):
return [self[alias] for alias in self]
@lru_cache.lru_cache()
def get_app_template_dirs(dirname):
"""
Return an iterable of paths of directories to load app templates from.
dirname is the name of the subdirectory containing templates inside
installed applications.
"""
template_dirs = []
for app_config in apps.get_app_configs():
if not app_config.path:
continue
template_dir = os.path.join(app_config.path, dirname)
if os.path.isdir(template_dir):
template_dirs.append(upath(template_dir))
# Immutable return value because it will be cached and shared by callers.
return tuple(template_dirs)
| mit |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python2.7/test/test_fileinput.py | 34 | 9590 | '''
Tests for fileinput module.
Nick Mathewson
'''
import unittest
from test.test_support import verbose, TESTFN, run_unittest
from test.test_support import unlink as safe_unlink
import sys, re
from StringIO import StringIO
from fileinput import FileInput, hook_encoded
# The fileinput module has 2 interfaces: the FileInput class which does
# all the work, and a few functions (input, etc.) that use a global _state
# variable. We only test the FileInput class, since the other functions
# only provide a thin facade over FileInput.
# Write lines (a list of lines) to temp file number i, and return the
# temp file's name.
def writeTmp(i, lines, mode='w'): # opening in text mode is the default
name = TESTFN + str(i)
f = open(name, mode)
f.writelines(lines)
f.close()
return name
def remove_tempfiles(*names):
for name in names:
safe_unlink(name)
class BufferSizesTests(unittest.TestCase):
def test_buffer_sizes(self):
# First, run the tests with default and teeny buffer size.
for round, bs in (0, 0), (1, 30):
try:
t1 = writeTmp(1, ["Line %s of file 1\n" % (i+1) for i in range(15)])
t2 = writeTmp(2, ["Line %s of file 2\n" % (i+1) for i in range(10)])
t3 = writeTmp(3, ["Line %s of file 3\n" % (i+1) for i in range(5)])
t4 = writeTmp(4, ["Line %s of file 4\n" % (i+1) for i in range(1)])
self.buffer_size_test(t1, t2, t3, t4, bs, round)
finally:
remove_tempfiles(t1, t2, t3, t4)
def buffer_size_test(self, t1, t2, t3, t4, bs=0, round=0):
pat = re.compile(r'LINE (\d+) OF FILE (\d+)')
start = 1 + round*6
if verbose:
print '%s. Simple iteration (bs=%s)' % (start+0, bs)
fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
lines = list(fi)
fi.close()
self.assertEqual(len(lines), 31)
self.assertEqual(lines[4], 'Line 5 of file 1\n')
self.assertEqual(lines[30], 'Line 1 of file 4\n')
self.assertEqual(fi.lineno(), 31)
self.assertEqual(fi.filename(), t4)
if verbose:
print '%s. Status variables (bs=%s)' % (start+1, bs)
fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
s = "x"
while s and s != 'Line 6 of file 2\n':
s = fi.readline()
self.assertEqual(fi.filename(), t2)
self.assertEqual(fi.lineno(), 21)
self.assertEqual(fi.filelineno(), 6)
self.assertFalse(fi.isfirstline())
self.assertFalse(fi.isstdin())
if verbose:
print '%s. Nextfile (bs=%s)' % (start+2, bs)
fi.nextfile()
self.assertEqual(fi.readline(), 'Line 1 of file 3\n')
self.assertEqual(fi.lineno(), 22)
fi.close()
if verbose:
print '%s. Stdin (bs=%s)' % (start+3, bs)
fi = FileInput(files=(t1, t2, t3, t4, '-'), bufsize=bs)
savestdin = sys.stdin
try:
sys.stdin = StringIO("Line 1 of stdin\nLine 2 of stdin\n")
lines = list(fi)
self.assertEqual(len(lines), 33)
self.assertEqual(lines[32], 'Line 2 of stdin\n')
self.assertEqual(fi.filename(), '<stdin>')
fi.nextfile()
finally:
sys.stdin = savestdin
if verbose:
print '%s. Boundary conditions (bs=%s)' % (start+4, bs)
fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
self.assertEqual(fi.lineno(), 0)
self.assertEqual(fi.filename(), None)
fi.nextfile()
self.assertEqual(fi.lineno(), 0)
self.assertEqual(fi.filename(), None)
if verbose:
print '%s. Inplace (bs=%s)' % (start+5, bs)
savestdout = sys.stdout
try:
fi = FileInput(files=(t1, t2, t3, t4), inplace=1, bufsize=bs)
for line in fi:
line = line[:-1].upper()
print line
fi.close()
finally:
sys.stdout = savestdout
fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
for line in fi:
self.assertEqual(line[-1], '\n')
m = pat.match(line[:-1])
self.assertNotEqual(m, None)
self.assertEqual(int(m.group(1)), fi.filelineno())
fi.close()
class FileInputTests(unittest.TestCase):
def test_zero_byte_files(self):
try:
t1 = writeTmp(1, [""])
t2 = writeTmp(2, [""])
t3 = writeTmp(3, ["The only line there is.\n"])
t4 = writeTmp(4, [""])
fi = FileInput(files=(t1, t2, t3, t4))
line = fi.readline()
self.assertEqual(line, 'The only line there is.\n')
self.assertEqual(fi.lineno(), 1)
self.assertEqual(fi.filelineno(), 1)
self.assertEqual(fi.filename(), t3)
line = fi.readline()
self.assertFalse(line)
self.assertEqual(fi.lineno(), 1)
self.assertEqual(fi.filelineno(), 0)
self.assertEqual(fi.filename(), t4)
fi.close()
finally:
remove_tempfiles(t1, t2, t3, t4)
def test_files_that_dont_end_with_newline(self):
try:
t1 = writeTmp(1, ["A\nB\nC"])
t2 = writeTmp(2, ["D\nE\nF"])
fi = FileInput(files=(t1, t2))
lines = list(fi)
self.assertEqual(lines, ["A\n", "B\n", "C", "D\n", "E\n", "F"])
self.assertEqual(fi.filelineno(), 3)
self.assertEqual(fi.lineno(), 6)
finally:
remove_tempfiles(t1, t2)
def test_unicode_filenames(self):
try:
t1 = writeTmp(1, ["A\nB"])
encoding = sys.getfilesystemencoding()
if encoding is None:
encoding = 'ascii'
fi = FileInput(files=unicode(t1, encoding))
lines = list(fi)
self.assertEqual(lines, ["A\n", "B"])
finally:
remove_tempfiles(t1)
def test_fileno(self):
try:
t1 = writeTmp(1, ["A\nB"])
t2 = writeTmp(2, ["C\nD"])
fi = FileInput(files=(t1, t2))
self.assertEqual(fi.fileno(), -1)
line = fi.next()
self.assertNotEqual(fi.fileno(), -1)
fi.nextfile()
self.assertEqual(fi.fileno(), -1)
line = list(fi)
self.assertEqual(fi.fileno(), -1)
finally:
remove_tempfiles(t1, t2)
def test_opening_mode(self):
try:
# invalid mode, should raise ValueError
fi = FileInput(mode="w")
self.fail("FileInput should reject invalid mode argument")
except ValueError:
pass
try:
# try opening in universal newline mode
t1 = writeTmp(1, ["A\nB\r\nC\rD"], mode="wb")
fi = FileInput(files=t1, mode="U")
lines = list(fi)
self.assertEqual(lines, ["A\n", "B\n", "C\n", "D"])
finally:
remove_tempfiles(t1)
def test_file_opening_hook(self):
try:
# cannot use openhook and inplace mode
fi = FileInput(inplace=1, openhook=lambda f,m: None)
self.fail("FileInput should raise if both inplace "
"and openhook arguments are given")
except ValueError:
pass
try:
fi = FileInput(openhook=1)
self.fail("FileInput should check openhook for being callable")
except ValueError:
pass
try:
t1 = writeTmp(1, ["A\nB"], mode="wb")
fi = FileInput(files=t1, openhook=hook_encoded("rot13"))
lines = list(fi)
self.assertEqual(lines, ["N\n", "O"])
finally:
remove_tempfiles(t1)
def test_readline(self):
with open(TESTFN, 'wb') as f:
f.write('A\nB\r\nC\r')
# Fill TextIOWrapper buffer.
f.write('123456789\n' * 1000)
# Issue #20501: readline() shouldn't read whole file.
f.write('\x80')
self.addCleanup(safe_unlink, TESTFN)
fi = FileInput(files=TESTFN, openhook=hook_encoded('ascii'), bufsize=8)
# The most likely failure is a UnicodeDecodeError due to the entire
# file being read when it shouldn't have been.
self.assertEqual(fi.readline(), u'A\n')
self.assertEqual(fi.readline(), u'B\r\n')
self.assertEqual(fi.readline(), u'C\r')
with self.assertRaises(UnicodeDecodeError):
# Read to the end of file.
list(fi)
fi.close()
class Test_hook_encoded(unittest.TestCase):
"""Unit tests for fileinput.hook_encoded()"""
def test_modes(self):
with open(TESTFN, 'wb') as f:
# UTF-7 is a convenient, seldom used encoding
f.write('A\nB\r\nC\rD+IKw-')
self.addCleanup(safe_unlink, TESTFN)
def check(mode, expected_lines):
fi = FileInput(files=TESTFN, mode=mode,
openhook=hook_encoded('utf-7'))
lines = list(fi)
fi.close()
self.assertEqual(lines, expected_lines)
check('r', [u'A\n', u'B\r\n', u'C\r', u'D\u20ac'])
check('rU', [u'A\n', u'B\r\n', u'C\r', u'D\u20ac'])
check('U', [u'A\n', u'B\r\n', u'C\r', u'D\u20ac'])
check('rb', [u'A\n', u'B\r\n', u'C\r', u'D\u20ac'])
def test_main():
run_unittest(BufferSizesTests, FileInputTests, Test_hook_encoded)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
ranjinidas/Axelrod | axelrod/tests/unit/test_calculator.py | 2 | 1308 | """Tests for calculator strategies."""
import random
import axelrod
from .test_player import TestPlayer
C, D = axelrod.Actions.C, axelrod.Actions.D
class TestCalculator(TestPlayer):
name = "Calculator"
player = axelrod.Calculator
expected_classifier = {
'memory_depth': float('inf'),
'stochastic': True,
'makes_use_of': set(),
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def test_strategy(self):
self.first_play_test(C)
P1 = axelrod.Calculator()
P1.history = [C] * 20
P2 = axelrod.Player()
P2.history = [C, D] * 10
# Defects on cycle detection
self.assertEqual(D, P1.strategy(P2))
# Test non-cycle response
history = [C, C, D, C, C, D, C, C, C, D, C, C, C, C, D, C, C, C, C, C]
P2.history = history
self.assertEqual(C, P1.strategy(P2))
# Test post 20 rounds responses
self.responses_test([C] * 21, [C] * 21, [D])
history = [C, C, D, C, C, D, C, C, C, D, C, C, C, C, D, C, C, C, C, C, D]
self.responses_test([C] * 21, history, [D])
history = [C, C, D, C, C, D, C, C, C, D, C, C, C, C, D, C, C, C, C, C, D, C]
self.responses_test([C] * 22, history, [C])
| mit |
kzlin129/practice-typing | lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/filepost.py | 551 | 2512 | # urllib3/filepost.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import codecs
import mimetypes
from uuid import uuid4
from io import BytesIO
from .packages import six
from .packages.six import b
from .fields import RequestField
writer = codecs.lookup('utf-8')[3]
def choose_boundary():
"""
Our embarassingly-simple replacement for mimetools.choose_boundary.
"""
return uuid4().hex
def iter_field_objects(fields):
"""
Iterate over fields.
Supports list of (k, v) tuples and dicts, and lists of
:class:`~urllib3.fields.RequestField`.
"""
if isinstance(fields, dict):
i = six.iteritems(fields)
else:
i = iter(fields)
for field in i:
if isinstance(field, RequestField):
yield field
else:
yield RequestField.from_tuples(*field)
def iter_fields(fields):
"""
.. deprecated:: 1.6
Iterate over fields.
The addition of :class:`~urllib3.fields.RequestField` makes this function
obsolete. Instead, use :func:`iter_field_objects`, which returns
:class:`~urllib3.fields.RequestField` objects.
Supports list of (k, v) tuples and dicts.
"""
if isinstance(fields, dict):
return ((k, v) for k, v in six.iteritems(fields))
return ((k, v) for k, v in fields)
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
:param fields:
Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for field in iter_field_objects(fields):
body.write(b('--%s\r\n' % (boundary)))
writer(body).write(field.render_headers())
data = field.data
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = str('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type
| apache-2.0 |
TopOPPS/salesforce_user_group_April_3_2014 | demo/views.py | 1 | 2691 | from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
from simple_salesforce import Salesforce
import base64
import hashlib
import hmac
import json
@csrf_exempt
def index(request):
"""
When this view is loaded in a canvas app, Salesforce sends a POST request to it containing the
currently logged in user's data.
This post data contains a base64 string of the user's data, along with a signature. We can validate
the signature by comparing it to our own generated expected signature from our Salesforce
application's secret key.
The result of parsing the signed request is that we obtain an instance_url and oauth_token we
can use to query the Salesforce API. Feel free to dive into the parse_signed_request() function
to see the nitty-gritty details.
"""
data = parse_signed_request(request.POST['signed_request'], settings.SALESFORCE_CONSUMER_SECRET)
if data:
#User authenticated, let's do queries!
#Create a simple salesforce instance, using our newly obtained instance_url and oauth_token for authentication.
sf = Salesforce(instance_url=data['client']['instanceUrl'], session_id=data['client']['oauthToken'])
#execute a query
stats = {}
for opp in sf.query_all("SELECT LeadSource FROM Opportunity WHERE IsWon = True")['records']:
if opp['LeadSource'] not in stats:
stats[opp['LeadSource']] = 0
stats[opp['LeadSource']] += 1
results = []
for lead_source,total in stats.items():
results.append({"lead_source": lead_source, "total": total})
results = sorted(results, key=lambda k: k['total']*-1) #sort results by total
else:
#invalid signed request, throw error.
pass
return render(request, 'index.html', {
"results": results
})
def parse_signed_request(signed_request, secret):
""" Used for signed requests, Canvas App Authentication """
l = signed_request.split('.', 2)
encoded_sig = l[0]
payload = l[1]
sig = base64_url_decode(encoded_sig)
data = json.loads(base64_url_decode(payload))
expected_sig = hmac.new(secret, msg=payload, digestmod=hashlib.sha256).digest()
if sig != expected_sig:
#Bad Signature
return None
else:
#Valid signed request
return data
def base64_url_decode(inp):
""" Used for signed requests, Canvas App Authentication """
padding_factor = (4 - len(inp) % 4) % 4
inp += "="*padding_factor
return base64.b64decode(unicode(inp).translate(dict(zip(map(ord, u'-_'), u'+/')))) | mit |
haoyuchen1992/osf.io | website/addons/badges/views/crud.py | 59 | 2323 | # -*- coding: utf-8 -*-
import httplib as http
from flask import request
from framework.exceptions import HTTPError
from website.util.sanitize import escape_html
from website.project.decorators import ( # noqa
must_be_contributor_or_public,
must_have_addon, must_not_be_registration,
must_be_valid_project,
must_have_permission
)
from ..model import Badge, BadgeAssertion
@must_be_contributor_or_public
@must_have_addon('badges', 'node')
@must_have_addon('badges', 'user')
def award_badge(*args, **kwargs):
badgeid = request.json.get('badgeid')
evidence = request.json.get('evidence')
node = kwargs['node'] or kwargs['project']
awarder = kwargs['user_addon']
if not awarder or not awarder.can_award:
raise HTTPError(http.FORBIDDEN)
badge = Badge.load(badgeid)
if not badge:
raise HTTPError(http.BAD_REQUEST)
if badge.is_system_badge:
return BadgeAssertion.create(badge, node, evidence, awarder=awarder)._id
return BadgeAssertion.create(badge, node, evidence)._id
@must_have_addon('badges', 'user')
def create_badge(*args, **kwargs):
badge_data = request.json
awarder = kwargs['user_addon']
if (not badge_data or not badge_data.get('badgeName') or
not badge_data.get('description') or
not badge_data.get('imageurl') or
not badge_data.get('criteria')):
raise HTTPError(http.BAD_REQUEST)
try:
id = Badge.create(awarder, escape_html(badge_data))._id
return {'badgeid': id}, http.CREATED
except IOError:
raise HTTPError(http.BAD_REQUEST)
@must_be_valid_project
@must_have_addon('badges', 'user')
@must_have_addon('badges', 'node')
def revoke_badge(*args, **kwargs):
_id = request.json.get('id')
reason = request.json.get('reason', '')
if _id and kwargs['user_addon'].can_award:
assertion = BadgeAssertion.load(_id)
if assertion:
if assertion.badge and assertion.awarder.owner._id == kwargs['user_addon'].owner._id:
assertion.revoked = True
assertion.reason = reason
kwargs['user_addon'].revocation_list[_id] = reason
assertion.save()
kwargs['user_addon'].save()
return http.OK
raise HTTPError(http.BAD_REQUEST)
| apache-2.0 |
shujaatak/UAV_MissionPlanner | Lib/posixpath.py | 145 | 13182 | """Common operations on Posix pathnames.
Instead of importing this module directly, import os and refer to
this module as os.path. The "os.path" name is an alias for this
module on Posix systems; on other systems (e.g. Mac, Windows),
os.path provides the same operations in a manner specific to that
platform, and is an alias to another module (e.g. macpath, ntpath).
Some of this can actually be useful on non-Posix systems too, e.g.
for manipulation of the pathname component of URLs.
"""
import os
import sys
import stat
import genericpath
import warnings
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime","islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"samefile","sameopenfile","samestat",
"curdir","pardir","sep","pathsep","defpath","altsep","extsep",
"devnull","realpath","supports_unicode_filenames","relpath"]
# strings representing various path-related bits and pieces
curdir = '.'
pardir = '..'
extsep = '.'
sep = '/'
pathsep = ':'
defpath = ':/bin:/usr/bin'
altsep = None
devnull = '/dev/null'
# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac.
# On MS-DOS this may also turn slashes into backslashes; however, other
# normalizations (such as optimizing '../' away) are not allowed
# (another function should be defined to do that).
def normcase(s):
"""Normalize case of pathname. Has no effect under Posix"""
return s
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
def isabs(s):
"""Test whether a path is absolute"""
return s.startswith('/')
# Join pathnames.
# Ignore the previous parts if a part is absolute.
# Insert a '/' unless the first part is empty or already ends in '/'.
def join(a, *p):
"""Join two or more pathname components, inserting '/' as needed.
If any component is an absolute path, all previous path components
will be discarded."""
path = a
for b in p:
if b.startswith('/'):
path = b
elif path == '' or path.endswith('/'):
path += b
else:
path += '/' + b
return path
# Split a path in head (everything up to the last '/') and tail (the
# rest). If the path ends in '/', tail will be empty. If there is no
# '/' in the path, head will be empty.
# Trailing '/'es are stripped from head unless it is the root.
def split(p):
"""Split a pathname. Returns tuple "(head, tail)" where "tail" is
everything after the final slash. Either part may be empty."""
i = p.rfind('/') + 1
head, tail = p[:i], p[i:]
if head and head != '/'*len(head):
head = head.rstrip('/')
return head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Split a pathname into a drive specification and the rest of the
# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty.
def splitdrive(p):
"""Split a pathname into drive and path. On Posix, drive is always
empty."""
return '', p
# Return the tail (basename) part of a path, same as split(path)[1].
def basename(p):
"""Returns the final component of a pathname"""
i = p.rfind('/') + 1
return p[i:]
# Return the head (dirname) part of a path, same as split(path)[0].
def dirname(p):
"""Returns the directory component of a pathname"""
i = p.rfind('/') + 1
head = p[:i]
if head and head != '/'*len(head):
head = head.rstrip('/')
return head
# Is a path a symbolic link?
# This will always return false on systems where os.lstat doesn't exist.
def islink(path):
"""Test whether a path is a symbolic link"""
try:
st = os.lstat(path)
except (os.error, AttributeError):
return False
return stat.S_ISLNK(st.st_mode)
# Being true for dangling symbolic links is also useful.
def lexists(path):
"""Test whether a path exists. Returns True for broken symbolic links"""
try:
os.lstat(path)
except os.error:
return False
return True
# Are two filenames really pointing to the same file?
def samefile(f1, f2):
"""Test whether two pathnames reference the same actual file"""
s1 = os.stat(f1)
s2 = os.stat(f2)
return samestat(s1, s2)
# Are two open files really referencing the same file?
# (Not necessarily the same file descriptor!)
def sameopenfile(fp1, fp2):
"""Test whether two open file objects reference the same file"""
s1 = os.fstat(fp1)
s2 = os.fstat(fp2)
return samestat(s1, s2)
# Are two stat buffers (obtained from stat, fstat or lstat)
# describing the same file?
def samestat(s1, s2):
"""Test whether two stat buffers reference the same file"""
return s1.st_ino == s2.st_ino and \
s1.st_dev == s2.st_dev
# Is a path a mount point?
# (Does this work for all UNIXes? Is it even guaranteed to work by Posix?)
def ismount(path):
"""Test whether a path is a mount point"""
if islink(path):
# A symlink can never be a mount point
return False
try:
s1 = os.lstat(path)
s2 = os.lstat(join(path, '..'))
except os.error:
return False # It doesn't exist -- so not a mount point :-)
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
return True # path/.. on a different device as path
ino1 = s1.st_ino
ino2 = s2.st_ino
if ino1 == ino2:
return True # path/.. is the same i-node as path
return False
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.",
stacklevel=2)
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
try:
st = os.lstat(name)
except os.error:
continue
if stat.S_ISDIR(st.st_mode):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if not path.startswith('~'):
return path
i = path.find('/', 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
userhome = pwd.getpwuid(os.getuid()).pw_dir
else:
userhome = os.environ['HOME']
else:
import pwd
try:
pwent = pwd.getpwnam(path[1:i])
except KeyError:
return path
userhome = pwent.pw_dir
userhome = userhome.rstrip('/') or userhome
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# This expands the forms $variable and ${variable} only.
# Non-existent variables are left unchanged.
_varprog = None
def expandvars(path):
"""Expand shell variables of form $var and ${var}. Unknown variables
are left unchanged."""
global _varprog
if '$' not in path:
return path
if not _varprog:
import re
_varprog = re.compile(r'\$(\w+|\{[^}]*\})')
i = 0
while True:
m = _varprog.search(path, i)
if not m:
break
i, j = m.span(0)
name = m.group(1)
if name.startswith('{') and name.endswith('}'):
name = name[1:-1]
if name in os.environ:
tail = path[j:]
path = path[:i] + os.environ[name]
i = len(path)
path += tail
else:
i = j
return path
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
# It should be understood that this may change the meaning of the path
# if it contains symbolic links!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
# Preserve unicode (if path is unicode)
slash, dot = (u'/', u'.') if isinstance(path, unicode) else ('/', '.')
if path == '':
return dot
initial_slashes = path.startswith('/')
# POSIX allows one or two initial slashes, but treats three or more
# as single slash.
if (initial_slashes and
path.startswith('//') and not path.startswith('///')):
initial_slashes = 2
comps = path.split('/')
new_comps = []
for comp in comps:
if comp in ('', '.'):
continue
if (comp != '..' or (not initial_slashes and not new_comps) or
(new_comps and new_comps[-1] == '..')):
new_comps.append(comp)
elif new_comps:
new_comps.pop()
comps = new_comps
path = slash.join(comps)
if initial_slashes:
path = slash*initial_slashes + path
return path or dot
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
if isinstance(path, unicode):
cwd = os.getcwdu()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
# Return a canonical path (i.e. the absolute location of a file on the
# filesystem).
def realpath(filename):
"""Return the canonical path of the specified filename, eliminating any
symbolic links encountered in the path."""
if isabs(filename):
bits = ['/'] + filename.split('/')[1:]
else:
bits = [''] + filename.split('/')
for i in range(2, len(bits)+1):
component = join(*bits[0:i])
# Resolve symbolic links.
if islink(component):
resolved = _resolve_link(component)
if resolved is None:
# Infinite loop -- return original component + rest of the path
return abspath(join(*([component] + bits[i:])))
else:
newpath = join(*([resolved] + bits[i:]))
return realpath(newpath)
return abspath(filename)
def _resolve_link(path):
"""Internal helper function. Takes a path and follows symlinks
until we either arrive at something that isn't a symlink, or
encounter a path we've seen before (meaning that there's a loop).
"""
paths_seen = set()
while islink(path):
if path in paths_seen:
# Already seen this path, so we must have a symlink loop
return None
paths_seen.add(path)
# Resolve where the link points to
resolved = os.readlink(path)
if not isabs(resolved):
dir = dirname(path)
path = normpath(join(dir, resolved))
else:
path = normpath(resolved)
return path
supports_unicode_filenames = (sys.platform == 'darwin')
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = [x for x in abspath(start).split(sep) if x]
path_list = [x for x in abspath(path).split(sep) if x]
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
| gpl-2.0 |
justajeffy/arsenalsuite | python/apps/farm_stats/reportselectordialog.py | 11 | 1246 |
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.uic import *
from blur.Stonegui import *
from farmreport import Types
from reportwindow import *
ReportWindows = []
class ReportSelectorDialog(QDialog):
def __init__(self,parent=None):
QDialog.__init__(self,parent)
loadUi("reportselectordialogui.ui",self)
dt = QDateTime.currentDateTime()
self.mStart.setDateTime( dt.addDays(-7) )
self.mEnd.setDateTime( dt )
for reportType in Types:
self.mReportList.addItem( reportType.Name )
self.connect( self.mExitButton, SIGNAL( 'clicked()' ), self.close )
self.connect( self.mGenerateButton, SIGNAL( 'clicked()' ), self.generateReport )
self.mGenerateButton.setEnabled( False )
self.connect( self.mReportList, SIGNAL( 'currentRowChanged(int)' ), self.currentChanged )
def currentChanged( self, row ):
self.mGenerateButton.setEnabled( row >= 0 )
def generateReport( self ):
reportName = self.mReportList.currentItem().text()
for reportType in Types:
if reportType.Name == reportName:
report = reportType.Class()
report.generate( self.mStart.dateTime(), self.mEnd.dateTime() )
rw = ReportWindow()
rw.loadReport( report )
rw.show()
ReportWindows.append(rw)
return
| gpl-2.0 |
tcatut/plugin.video.youtube | resources/lib/youtube/helper/yt_login.py | 1 | 4617 | __author__ = 'bromix'
import time
def process(mode, provider, context, re_match, needs_tv_login=True):
def _do_login(_for_tv=False):
_client = provider.get_client(context)
json_data = {}
if _for_tv:
json_data = _client.generate_user_code_tv()
pass
else:
json_data = _client.generate_user_code()
pass
interval = int(json_data.get('interval', 5)) * 1000
if interval > 60000:
interval = 5000
pass
device_code = json_data['device_code']
user_code = json_data['user_code']
text = context.localize(provider.LOCAL_MAP['youtube.sign.go_to']) % '[B]youtube.com/activate[/B]'
text += '[CR]%s [B]%s[/B]' % (context.localize(provider.LOCAL_MAP['youtube.sign.enter_code']), user_code)
dialog = context.get_ui().create_progress_dialog(
heading=context.localize(provider.LOCAL_MAP['youtube.sign.in']), text=text, background=False)
steps = (10 * 60 * 1000) / interval # 10 Minutes
dialog.set_total(steps)
for i in range(steps):
dialog.update()
json_data = {}
if _for_tv:
json_data = _client.get_device_token_tv(device_code)
pass
else:
json_data = _client.get_device_token(device_code)
pass
if not 'error' in json_data:
access_token = json_data.get('access_token', '')
expires_in = time.time() + int(json_data.get('expires_in', 3600))
refresh_token = json_data.get('refresh_token', '')
if access_token and refresh_token:
dialog.close()
return access_token, expires_in, refresh_token
# provider.reset_client()
# context.get_access_manager().update_access_token(access_token, expires_in, refresh_token)
#context.get_ui().refresh_container()
break
pass
if dialog.is_aborted():
dialog.close()
return '', 0, ''
context.sleep(interval)
pass
dialog.close()
pass
if mode == 'out':
# we clear the cache, so none cached data of an old account will be displayed.
context.get_function_cache().clear()
access_manager = context.get_access_manager()
client = provider.get_client(context)
if access_manager.has_refresh_token():
refresh_tokens = access_manager.get_refresh_token().split('|')
for refresh_token in refresh_tokens:
client.revoke(refresh_token)
pass
pass
provider.reset_client()
access_manager.update_access_token(access_token='', refresh_token='')
context.get_ui().refresh_container()
pass
elif mode == 'in':
access_token_tv = ''
expires_in_tv = 0
refresh_token_tv = ''
if needs_tv_login:
context.get_ui().on_ok(context.localize(provider.LOCAL_MAP['youtube.sign.twice.title']),
context.localize(provider.LOCAL_MAP['youtube.sign.twice.text']))
access_token_tv, expires_in_tv, refresh_token_tv = _do_login(_for_tv=True)
# abort tv login
if not access_token_tv and not refresh_token_tv:
provider.reset_client()
context.get_access_manager().update_access_token('')
context.get_ui().refresh_container()
return
pass
access_token_kodi, expires_in_kodi, refresh_token_kodi = _do_login(_for_tv=False)
# abort kodi login
if not access_token_kodi and not refresh_token_kodi:
provider.reset_client()
context.get_access_manager().update_access_token('')
context.get_ui().refresh_container()
return
if needs_tv_login:
access_token = '%s|%s' % (access_token_tv, access_token_kodi)
refresh_token = '%s|%s' % (refresh_token_tv, refresh_token_kodi)
expires_in = min(expires_in_tv, expires_in_kodi)
pass
else:
access_token = access_token_kodi
refresh_token = refresh_token_kodi
expires_in = expires_in_kodi
pass
provider.reset_client()
context.get_access_manager().update_access_token(access_token, expires_in, refresh_token)
context.get_ui().refresh_container()
pass
pass
| gpl-2.0 |
Kmaschta/django-algolia | algolia/tests/test_backends.py | 1 | 2099 | # -*- coding: utf-8 -*-
import pytest
from django.core.exceptions import ImproperlyConfigured
from algolia import AlgoliaIndexer
@pytest.fixture()
def configs_success():
return {
'API_KEY': 'some-api-key',
'API_SECRET': 'some-api-secret',
}
@pytest.fixture()
def configs_wrong():
return {'QUIET': False}
@pytest.fixture()
def indexer(configs_success):
return AlgoliaIndexer(configs_success)
def test_init_success(indexer, configs_success):
assert indexer.configs == configs_success
def test_init_wrong(configs_wrong):
try:
AlgoliaIndexer(configs_wrong)
assert False
except ImproperlyConfigured:
assert True
def test_get_client(indexer):
assert indexer.client == indexer.get_client()
def test_get_index_name(indexer):
class MyModel():
pass
instance = MyModel()
# Return the correct name
assert indexer._get_index_name(instance) == 'MyModelDjangoAlgolia'
assert indexer._get_index_name(model=MyModel) == 'MyModelDjangoAlgolia'
assert indexer._get_index_name(instance, with_suffix=False) == 'MyModel'
assert indexer._get_index_name(model=MyModel, with_suffix=False) == 'MyModel'
# Raise exception if bad parameters
try:
indexer._get_index_name(instance, MyModel)
assert False
except ValueError:
assert True
try:
indexer._get_index_name()
assert False
except ValueError:
assert True
# React well depending on the configurations
indexer.configs['INDEX_SUFFIX'] = 'OtherSuffix'
assert indexer._get_index_name(instance) == 'MyModelOtherSuffix'
assert indexer._get_index_name(model=MyModel) == 'MyModelOtherSuffix'
indexer.configs['SUFFIX_MY_INDEX'] = False
assert indexer._get_index_name(instance, with_suffix=False) == 'MyModel'
assert indexer._get_index_name(model=MyModel, with_suffix=False) == 'MyModel'
def test_search(indexer):
class MyModel():
pass
indexer.configs['TEST_MODE'] = True
assert indexer.search(MyModel, 'test') == indexer.test_response
| bsd-2-clause |
Laurence11/ProjetSoleil | vendor/doctrine/orm/docs/en/conf.py | 2448 | 6497 | # -*- coding: utf-8 -*-
#
# Doctrine 2 ORM documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 3 18:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configurationblock']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Doctrine 2 ORM'
copyright = u'2010-12, Doctrine Project Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = '2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'doctrine'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Doctrine2ORMdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation',
u'Doctrine Project Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
primary_domain = "dcorm"
def linkcode_resolve(domain, info):
if domain == 'dcorm':
return 'http://'
return None
| mit |
minhphung171093/GreenERP_V9 | openerp/addons/website_portal_sale/controllers/main.py | 17 | 2084 | # -*- coding: utf-8 -*-
import datetime
from openerp import http
from openerp.http import request
from openerp.addons.website_portal.controllers.main import website_account
class website_account(website_account):
@http.route(['/my/home'], type='http', auth="user", website=True)
def account(self, **kw):
""" Add sales documents to main account page """
response = super(website_account, self).account()
partner = request.env.user.partner_id
res_sale_order = request.env['sale.order']
res_invoices = request.env['account.invoice']
quotations = res_sale_order.search([
('partner_id.id', '=', partner.id),
('state', 'in', ['sent', 'cancel'])
])
orders = res_sale_order.search([
('partner_id.id', '=', partner.id),
('state', 'in', ['sale', 'done'])
])
invoices = res_invoices.search([
('partner_id.id', '=', partner.id),
('state', 'in', ['open', 'paid', 'cancelled'])
])
response.qcontext.update({
'date': datetime.date.today().strftime('%Y-%m-%d'),
'quotations': quotations,
'orders': orders,
'invoices': invoices,
})
return response
@http.route(['/my/orders/<int:order>'], type='http', auth="user", website=True)
def orders_followup(self, order=None):
partner = request.env['res.users'].browse(request.uid).partner_id
domain = [
('partner_id.id', '=', partner.id),
('state', 'not in', ['draft', 'cancel']),
('id', '=', order)
]
order = request.env['sale.order'].search(domain)
invoiced_lines = request.env['account.invoice.line'].search([('invoice_id', 'in', order.invoice_ids.ids)])
order_invoice_lines = {il.product_id.id: il.invoice_id for il in invoiced_lines}
return request.website.render("website_portal_sale.orders_followup", {
'order': order.sudo(),
'order_invoice_lines': order_invoice_lines,
})
| gpl-3.0 |
davidobrien1985/ansible-modules-core | packaging/os/rpm_key.py | 43 | 7357 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Ansible module to import third party repo keys to your rpm db
# (c) 2013, Héctor Acosta <hector.acosta@gazzang.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rpm_key
author: "Hector Acosta (@hacosta) <hector.acosta@gazzang.com>"
short_description: Adds or removes a gpg key from the rpm db
description:
- Adds or removes (rpm --import) a gpg key to your rpm database.
version_added: "1.3"
options:
key:
required: true
default: null
aliases: []
description:
- Key that will be modified. Can be a url, a file, or a keyid if the key already exists in the database.
state:
required: false
default: "present"
choices: [present, absent]
description:
- Wheather the key will be imported or removed from the rpm db.
validate_certs:
description:
- If C(no) and the C(key) is a url starting with https, SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
EXAMPLES = '''
# Example action to import a key from a url
- rpm_key: state=present key=http://apt.sw.be/RPM-GPG-KEY.dag.txt
# Example action to import a key from a file
- rpm_key: state=present key=/path/to/key.gpg
# Example action to ensure a key is not present in the db
- rpm_key: state=absent key=DEADB33F
'''
import re
import os.path
import urllib2
import tempfile
def is_pubkey(string):
"""Verifies if string is a pubkey"""
pgp_regex = ".*?(-----BEGIN PGP PUBLIC KEY BLOCK-----.*?-----END PGP PUBLIC KEY BLOCK-----).*"
return re.match(pgp_regex, string, re.DOTALL)
class RpmKey:
def __init__(self, module):
# If the key is a url, we need to check if it's present to be idempotent,
# to do that, we need to check the keyid, which we can get from the armor.
keyfile = None
should_cleanup_keyfile = False
self.module = module
self.rpm = self.module.get_bin_path('rpm', True)
state = module.params['state']
key = module.params['key']
if '://' in key:
keyfile = self.fetch_key(key)
keyid = self.getkeyid(keyfile)
should_cleanup_keyfile = True
elif self.is_keyid(key):
keyid = key
elif os.path.isfile(key):
keyfile = key
keyid = self.getkeyid(keyfile)
else:
self.module.fail_json(msg="Not a valid key %s" % key)
keyid = self.normalize_keyid(keyid)
if state == 'present':
if self.is_key_imported(keyid):
module.exit_json(changed=False)
else:
if not keyfile:
self.module.fail_json(msg="When importing a key, a valid file must be given")
self.import_key(keyfile, dryrun=module.check_mode)
if should_cleanup_keyfile:
self.module.cleanup(keyfile)
module.exit_json(changed=True)
else:
if self.is_key_imported(keyid):
self.drop_key(keyid, dryrun=module.check_mode)
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
def fetch_key(self, url):
"""Downloads a key from url, returns a valid path to a gpg key"""
try:
rsp, info = fetch_url(self.module, url)
key = rsp.read()
if not is_pubkey(key):
self.module.fail_json(msg="Not a public key: %s" % url)
tmpfd, tmpname = tempfile.mkstemp()
tmpfile = os.fdopen(tmpfd, "w+b")
tmpfile.write(key)
tmpfile.close()
return tmpname
except urllib2.URLError, e:
self.module.fail_json(msg=str(e))
def normalize_keyid(self, keyid):
"""Ensure a keyid doesn't have a leading 0x, has leading or trailing whitespace, and make sure is lowercase"""
ret = keyid.strip().lower()
if ret.startswith('0x'):
return ret[2:]
elif ret.startswith('0X'):
return ret[2:]
else:
return ret
def getkeyid(self, keyfile):
gpg = self.module.get_bin_path('gpg')
if not gpg:
gpg = self.module.get_bin_path('gpg2')
if not gpg:
self.json_fail(msg="rpm_key requires a command line gpg or gpg2, none found")
stdout, stderr = self.execute_command([gpg, '--no-tty', '--batch', '--with-colons', '--fixed-list-mode', '--list-packets', keyfile])
for line in stdout.splitlines():
line = line.strip()
if line.startswith(':signature packet:'):
# We want just the last 8 characters of the keyid
keyid = line.split()[-1].strip()[8:]
return keyid
self.json_fail(msg="Unexpected gpg output")
def is_keyid(self, keystr):
"""Verifies if a key, as provided by the user is a keyid"""
return re.match('(0x)?[0-9a-f]{8}', keystr, flags=re.IGNORECASE)
def execute_command(self, cmd):
rc, stdout, stderr = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg=stderr)
return stdout, stderr
def is_key_imported(self, keyid):
stdout, stderr = self.execute_command([self.rpm, '-qa', 'gpg-pubkey'])
for line in stdout.splitlines():
line = line.strip()
if not line:
continue
match = re.match('gpg-pubkey-([0-9a-f]+)-([0-9a-f]+)', line)
if not match:
self.module.fail_json(msg="rpm returned unexpected output [%s]" % line)
else:
if keyid == match.group(1):
return True
return False
def import_key(self, keyfile, dryrun=False):
if not dryrun:
self.execute_command([self.rpm, '--import', keyfile])
def drop_key(self, key, dryrun=False):
if not dryrun:
self.execute_command([self.rpm, '--erase', '--allmatches', "gpg-pubkey-%s" % key])
def main():
module = AnsibleModule(
argument_spec = dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
key=dict(required=True, type='str'),
validate_certs=dict(default='yes', type='bool'),
),
supports_check_mode=True
)
RpmKey(module)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 |
Russell-IO/ansible | lib/ansible/module_utils/six/__init__.py | 60 | 31144 | # This code is strewn with things that are not defined on Python3 (unicode,
# long, etc) but they are all shielded by version checks. This is also an
# upstream vendored file that we're not going to modify on our own
# pylint: disable=undefined-variable
# Copyright (c) 2010-2017 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Utilities for writing code that runs on Python 2 and 3"""
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.11.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("getoutput", "commands", "subprocess"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("splitvalue", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
try:
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
finally:
value = None
tb = None
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
try:
raise tp, value, tb
finally:
tb = None
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
try:
if from_value is None:
raise value
raise value from from_value
finally:
value = None
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
try:
raise value from from_value
finally:
value = None
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| gpl-3.0 |
AOKP/external_chromium_org | tools/checkbins/checkbins.py | 77 | 3798 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that all EXE and DLL files in the provided directory were built
correctly.
In essense it runs a subset of BinScope tests ensuring that binaries have
/NXCOMPAT, /DYNAMICBASE and /SAFESEH.
"""
import os
import optparse
import sys
# Find /third_party/pefile based on current directory and script path.
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..',
'third_party', 'pefile'))
import pefile
PE_FILE_EXTENSIONS = ['.exe', '.dll']
DYNAMICBASE_FLAG = 0x0040
NXCOMPAT_FLAG = 0x0100
NO_SEH_FLAG = 0x0400
MACHINE_TYPE_AMD64 = 0x8664
# Please do not add your file here without confirming that it indeed doesn't
# require /NXCOMPAT and /DYNAMICBASE. Contact cpu@chromium.org or your local
# Windows guru for advice.
EXCLUDED_FILES = ['chrome_frame_mini_installer.exe',
'mini_installer.exe',
'wow_helper.exe',
'xinput1_3.dll' # Microsoft DirectX redistributable.
]
def IsPEFile(path):
return (os.path.isfile(path) and
os.path.splitext(path)[1].lower() in PE_FILE_EXTENSIONS and
os.path.basename(path) not in EXCLUDED_FILES)
def main(options, args):
directory = args[0]
pe_total = 0
pe_passed = 0
for file in os.listdir(directory):
path = os.path.abspath(os.path.join(directory, file))
if not IsPEFile(path):
continue
pe = pefile.PE(path, fast_load=True)
pe.parse_data_directories(directories=[
pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG']])
pe_total = pe_total + 1
success = True
# Check for /DYNAMICBASE.
if pe.OPTIONAL_HEADER.DllCharacteristics & DYNAMICBASE_FLAG:
if options.verbose:
print "Checking %s for /DYNAMICBASE... PASS" % path
else:
success = False
print "Checking %s for /DYNAMICBASE... FAIL" % path
# Check for /NXCOMPAT.
if pe.OPTIONAL_HEADER.DllCharacteristics & NXCOMPAT_FLAG:
if options.verbose:
print "Checking %s for /NXCOMPAT... PASS" % path
else:
success = False
print "Checking %s for /NXCOMPAT... FAIL" % path
# Check for /SAFESEH. Binaries should meet one of the following
# criteria:
# 1) Have no SEH table as indicated by the DLL characteristics
# 2) Have a LOAD_CONFIG section containing a valid SEH table
# 3) Be a 64-bit binary, in which case /SAFESEH isn't required
#
# Refer to the following MSDN article for more information:
# http://msdn.microsoft.com/en-us/library/9a89h429.aspx
if (pe.OPTIONAL_HEADER.DllCharacteristics & NO_SEH_FLAG or
(hasattr(pe, "DIRECTORY_ENTRY_LOAD_CONFIG") and
pe.DIRECTORY_ENTRY_LOAD_CONFIG.struct.SEHandlerCount > 0 and
pe.DIRECTORY_ENTRY_LOAD_CONFIG.struct.SEHandlerTable != 0) or
pe.FILE_HEADER.Machine == MACHINE_TYPE_AMD64):
if options.verbose:
print "Checking %s for /SAFESEH... PASS" % path
else:
success = False
print "Checking %s for /SAFESEH... FAIL" % path
# Update tally.
if success:
pe_passed = pe_passed + 1
print "Result: %d files found, %d files passed" % (pe_total, pe_passed)
if pe_passed != pe_total:
sys.exit(1)
if __name__ == '__main__':
usage = "Usage: %prog [options] DIRECTORY"
option_parser = optparse.OptionParser(usage=usage)
option_parser.add_option("-v", "--verbose", action="store_true",
default=False, help="Print debug logging")
options, args = option_parser.parse_args()
if not args:
option_parser.print_help()
sys.exit(0)
main(options, args)
| bsd-3-clause |
bluevoda/BloggyBlog | lib/python3.4/site-packages/django/contrib/gis/geos/prototypes/errcheck.py | 486 | 2954 | """
Error checking functions for GEOS ctypes prototype functions.
"""
from ctypes import c_void_p, string_at
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.libgeos import GEOSFuncFactory
# Getting the `free` routine used to free the memory allocated for
# string pointers returned by GEOS.
free = GEOSFuncFactory('GEOSFree')
free.argtypes = [c_void_p]
def last_arg_byref(args):
"Returns the last C argument's value by reference."
return args[-1]._obj.value
def check_dbl(result, func, cargs):
"Checks the status code and returns the double value passed in by reference."
# Checking the status code
if result != 1:
return None
# Double passed in by reference, return its value.
return last_arg_byref(cargs)
def check_geom(result, func, cargs):
"Error checking on routines that return Geometries."
if not result:
raise GEOSException('Error encountered checking Geometry returned from GEOS C function "%s".' % func.__name__)
return result
def check_minus_one(result, func, cargs):
"Error checking on routines that should not return -1."
if result == -1:
raise GEOSException('Error encountered in GEOS C function "%s".' % func.__name__)
else:
return result
def check_predicate(result, func, cargs):
"Error checking for unary/binary predicate functions."
val = ord(result) # getting the ordinal from the character
if val == 1:
return True
elif val == 0:
return False
else:
raise GEOSException('Error encountered on GEOS C predicate function "%s".' % func.__name__)
def check_sized_string(result, func, cargs):
"""
Error checking for routines that return explicitly sized strings.
This frees the memory allocated by GEOS at the result pointer.
"""
if not result:
raise GEOSException('Invalid string pointer returned by GEOS C function "%s"' % func.__name__)
# A c_size_t object is passed in by reference for the second
# argument on these routines, and its needed to determine the
# correct size.
s = string_at(result, last_arg_byref(cargs))
# Freeing the memory allocated within GEOS
free(result)
return s
def check_string(result, func, cargs):
"""
Error checking for routines that return strings.
This frees the memory allocated by GEOS at the result pointer.
"""
if not result:
raise GEOSException('Error encountered checking string return value in GEOS C function "%s".' % func.__name__)
# Getting the string value at the pointer address.
s = string_at(result)
# Freeing the memory allocated within GEOS
free(result)
return s
def check_zero(result, func, cargs):
"Error checking on routines that should not return 0."
if result == 0:
raise GEOSException('Error encountered in GEOS C function "%s".' % func.__name__)
else:
return result
| gpl-3.0 |
abhiQmar/servo | tests/wpt/web-platform-tests/webdriver/support/keys.py | 41 | 17652 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The Keys implementation.
"""
from inspect import getmembers
class Keys(object):
"""
Set of special keys codes.
See also https://w3c.github.io/webdriver/webdriver-spec.html#h-keyboard-actions
"""
NULL = u"\ue000"
CANCEL = u"\ue001" # ^break
HELP = u"\ue002"
BACKSPACE = u"\ue003"
TAB = u"\ue004"
CLEAR = u"\ue005"
RETURN = u"\ue006"
ENTER = u"\ue007"
SHIFT = u"\ue008"
CONTROL = u"\ue009"
ALT = u"\ue00a"
PAUSE = u"\ue00b"
ESCAPE = u"\ue00c"
SPACE = u"\ue00d"
PAGE_UP = u"\ue00e"
PAGE_DOWN = u"\ue00f"
END = u"\ue010"
HOME = u"\ue011"
LEFT = u"\ue012"
UP = u"\ue013"
RIGHT = u"\ue014"
DOWN = u"\ue015"
INSERT = u"\ue016"
DELETE = u"\ue017"
SEMICOLON = u"\ue018"
EQUALS = u"\ue019"
NUMPAD0 = u"\ue01a" # number pad keys
NUMPAD1 = u"\ue01b"
NUMPAD2 = u"\ue01c"
NUMPAD3 = u"\ue01d"
NUMPAD4 = u"\ue01e"
NUMPAD5 = u"\ue01f"
NUMPAD6 = u"\ue020"
NUMPAD7 = u"\ue021"
NUMPAD8 = u"\ue022"
NUMPAD9 = u"\ue023"
MULTIPLY = u"\ue024"
ADD = u"\ue025"
SEPARATOR = u"\ue026"
SUBTRACT = u"\ue027"
DECIMAL = u"\ue028"
DIVIDE = u"\ue029"
F1 = u"\ue031" # function keys
F2 = u"\ue032"
F3 = u"\ue033"
F4 = u"\ue034"
F5 = u"\ue035"
F6 = u"\ue036"
F7 = u"\ue037"
F8 = u"\ue038"
F9 = u"\ue039"
F10 = u"\ue03a"
F11 = u"\ue03b"
F12 = u"\ue03c"
META = u"\ue03d"
# More keys from webdriver spec
ZENKAKUHANKAKU = u"\uE040"
R_SHIFT = u"\uE050"
R_CONTROL = u"\uE051"
R_ALT = u"\uE052"
R_META = u"\uE053"
R_PAGEUP = u"\uE054"
R_PAGEDOWN = u"\uE055"
R_END = u"\uE056"
R_HOME = u"\uE057"
R_ARROWLEFT = u"\uE058"
R_ARROWUP = u"\uE059"
R_ARROWRIGHT = u"\uE05A"
R_ARROWDOWN = u"\uE05B"
R_INSERT = u"\uE05C"
R_DELETE = u"\uE05D"
ALL_KEYS = getmembers(Keys, lambda x: type(x) == unicode)
ALL_EVENTS = {
"ADD": {
"code": "",
"ctrl": False,
"key": "+",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue025",
"which": 0,
},
"ALT": {
"code": "AltLeft",
"ctrl": False,
"key": "Alt",
"location": 1,
"meta": False,
"shift": False,
"value": u"\ue00a",
"which": 0,
},
"BACKSPACE": {
"code": "Backspace",
"ctrl": False,
"key": "Backspace",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue003",
"which": 0,
},
"CANCEL": {
"code": "",
"ctrl": False,
"key": "Cancel",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue001",
"which": 0,
},
"CLEAR": {
"code": "",
"ctrl": False,
"key": "Clear",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue005",
"which": 0,
},
"CONTROL": {
"code": "ControlLeft",
"ctrl": True,
"key": "Control",
"location": 1,
"meta": False,
"shift": False,
"value": u"\ue009",
"which": 0,
},
"DECIMAL": {
"code": "NumpadDecimal",
"ctrl": False,
"key": ".",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue028",
"which": 0,
},
"DELETE": {
"code": "Delete",
"ctrl": False,
"key": "Delete",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue017",
"which": 0,
},
"DIVIDE": {
"code": "NumpadDivide",
"ctrl": False,
"key": "/",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue029",
"which": 0,
},
"DOWN": {
"code": "ArrowDown",
"ctrl": False,
"key": "ArrowDown",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue015",
"which": 0,
},
"END": {
"code": "End",
"ctrl": False,
"key": "End",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue010",
"which": 0,
},
"ENTER": {
"code": "NumpadEnter",
"ctrl": False,
"key": "Enter",
"location": 1,
"meta": False,
"shift": False,
"value": u"\ue007",
"which": 0,
},
"EQUALS": {
"code": "",
"ctrl": False,
"key": "=",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue019",
"which": 0,
},
"ESCAPE": {
"code": "Escape",
"ctrl": False,
"key": "Escape",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue00c",
"which": 0,
},
"F1": {
"code": "F1",
"ctrl": False,
"key": "F1",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue031",
"which": 0,
},
"F10": {
"code": "F10",
"ctrl": False,
"key": "F10",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue03a",
"which": 0,
},
"F11": {
"code": "F11",
"ctrl": False,
"key": "F11",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue03b",
"which": 0,
},
"F12": {
"code": "F12",
"ctrl": False,
"key": "F12",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue03c",
"which": 0,
},
"F2": {
"code": "F2",
"ctrl": False,
"key": "F2",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue032",
"which": 0,
},
"F3": {
"code": "F3",
"ctrl": False,
"key": "F3",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue033",
"which": 0,
},
"F4": {
"code": "F4",
"ctrl": False,
"key": "F4",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue034",
"which": 0,
},
"F5": {
"code": "F5",
"ctrl": False,
"key": "F5",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue035",
"which": 0,
},
"F6": {
"code": "F6",
"ctrl": False,
"key": "F6",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue036",
"which": 0,
},
"F7": {
"code": "F7",
"ctrl": False,
"key": "F7",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue037",
"which": 0,
},
"F8": {
"code": "F8",
"ctrl": False,
"key": "F8",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue038",
"which": 0,
},
"F9": {
"code": "F9",
"ctrl": False,
"key": "F9",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue039",
"which": 0,
},
"HELP": {
"code": "Help",
"ctrl": False,
"key": "Help",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue002",
"which": 0,
},
"HOME": {
"code": "Home",
"ctrl": False,
"key": "Home",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue011",
"which": 0,
},
"INSERT": {
"code": "Insert",
"ctrl": False,
"key": "Insert",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue016",
"which": 0,
},
"LEFT": {
"code": "ArrowLeft",
"ctrl": False,
"key": "ArrowLeft",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue012",
"which": 0,
},
"META": {
"code": "OSLeft",
"ctrl": False,
"key": "Meta",
"location": 1,
"meta": True,
"shift": False,
"value": u"\ue03d",
"which": 0,
},
"MULTIPLY": {
"code": "NumpadMultiply",
"ctrl": False,
"key": "*",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue024",
"which": 0,
},
"NULL": {
"code": "",
"ctrl": False,
"key": "Unidentified",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue000",
"which": 0,
},
"NUMPAD0": {
"code": "Numpad0",
"ctrl": False,
"key": "0",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue01a",
"which": 0,
},
"NUMPAD1": {
"code": "Numpad1",
"ctrl": False,
"key": "1",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue01b",
"which": 0,
},
"NUMPAD2": {
"code": "Numpad2",
"ctrl": False,
"key": "2",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue01c",
"which": 0,
},
"NUMPAD3": {
"code": "Numpad3",
"ctrl": False,
"key": "3",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue01d",
"which": 0,
},
"NUMPAD4": {
"code": "PageDown",
"ctrl": False,
"key": "4",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue01e",
"which": 0,
},
"NUMPAD5": {
"code": "PageUp",
"ctrl": False,
"key": "5",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue01f",
"which": 0,
},
"NUMPAD6": {
"code": "Numpad6",
"ctrl": False,
"key": "6",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue020",
"which": 0,
},
"NUMPAD7": {
"code": "Numpad7",
"ctrl": False,
"key": "7",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue021",
"which": 0,
},
"NUMPAD8": {
"code": "Numpad8",
"ctrl": False,
"key": "8",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue022",
"which": 0,
},
"NUMPAD9": {
"code": "Numpad9",
"ctrl": False,
"key": "9",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue023",
"which": 0,
},
"PAGE_DOWN": {
"code": "",
"ctrl": False,
"key": "PageDown",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue00f",
"which": 0,
},
"PAGE_UP": {
"code": "",
"ctrl": False,
"key": "PageUp",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue00e",
"which": 0,
},
"PAUSE": {
"code": "",
"ctrl": False,
"key": "Pause",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue00b",
"which": 0,
},
"RETURN": {
"code": "Enter",
"ctrl": False,
"key": "Return",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue006",
"which": 0,
},
"RIGHT": {
"code": "ArrowRight",
"ctrl": False,
"key": "ArrowRight",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue014",
"which": 0,
},
"R_ALT": {
"code": "AltRight",
"ctrl": False,
"key": "Alt",
"location": 2,
"meta": False,
"shift": False,
"value": u"\ue052",
"which": 0,
},
"R_ARROWDOWN": {
"code": "Numpad2",
"ctrl": False,
"key": "ArrowDown",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue05b",
"which": 0,
},
"R_ARROWLEFT": {
"code": "Numpad4",
"ctrl": False,
"key": "ArrowLeft",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue058",
"which": 0,
},
"R_ARROWRIGHT": {
"code": "Numpad6",
"ctrl": False,
"key": "ArrowRight",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue05a",
"which": 0,
},
"R_ARROWUP": {
"code": "Numpad8",
"ctrl": False,
"key": "ArrowUp",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue059",
"which": 0,
},
"R_CONTROL": {
"code": "ControlRight",
"ctrl": True,
"key": "Control",
"location": 2,
"meta": False,
"shift": False,
"value": u"\ue051",
"which": 0,
},
"R_DELETE": {
"code": "NumpadDecimal",
"ctrl": False,
"key": "Delete",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue05d",
"which": 0,
},
"R_END": {
"code": "Numpad1",
"ctrl": False,
"key": "End",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue056",
"which": 0,
},
"R_HOME": {
"code": "Numpad7",
"ctrl": False,
"key": "Home",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue057",
"which": 0,
},
"R_INSERT": {
"code": "Numpad0",
"ctrl": False,
"key": "Insert",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue05c",
"which": 0,
},
"R_META": {
"code": "OSRight",
"ctrl": False,
"key": "Meta",
"location": 2,
"meta": True,
"shift": False,
"value": u"\ue053",
"which": 0,
},
"R_PAGEDOWN": {
"code": "Numpad3",
"ctrl": False,
"key": "PageDown",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue055",
"which": 0,
},
"R_PAGEUP": {
"code": "Numpad9",
"ctrl": False,
"key": "PageUp",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue054",
"which": 0,
},
"R_SHIFT": {
"code": "ShiftRight",
"ctrl": False,
"key": "Shift",
"location": 2,
"meta": False,
"shift": True,
"value": u"\ue050",
"which": 0,
},
"SEMICOLON": {
"code": "",
"ctrl": False,
"key": ";",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue018",
"which": 0,
},
"SEPARATOR": {
"code": "NumpadSubtract",
"ctrl": False,
"key": ",",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue026",
"which": 0,
},
"SHIFT": {
"code": "ShiftLeft",
"ctrl": False,
"key": "Shift",
"location": 1,
"meta": False,
"shift": True,
"value": u"\ue008",
"which": 0,
},
"SPACE": {
"code": "Space",
"ctrl": False,
"key": " ",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue00d",
"which": 0,
},
"SUBTRACT": {
"code": "",
"ctrl": False,
"key": "-",
"location": 3,
"meta": False,
"shift": False,
"value": u"\ue027",
"which": 0,
},
"TAB": {
"code": "Tab",
"ctrl": False,
"key": "Tab",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue004",
"which": 0,
},
"UP": {
"code": "ArrowUp",
"ctrl": False,
"key": "ArrowUp",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue013",
"which": 0,
},
"ZENKAKUHANKAKU": {
"code": "",
"ctrl": False,
"key": "ZenkakuHankaku",
"location": 0,
"meta": False,
"shift": False,
"value": u"\ue040",
"which": 0,
}
}
| mpl-2.0 |
azaghal/ansible | lib/ansible/utils/vars.py | 7 | 10177 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import keyword
import random
import uuid
from json import dumps
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.module_utils.six import iteritems, string_types, PY3
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.common._collections_compat import MutableMapping, MutableSequence
from ansible.parsing.splitter import parse_kv
ADDITIONAL_PY2_KEYWORDS = frozenset(("True", "False", "None"))
_MAXSIZE = 2 ** 32
cur_id = 0
node_mac = ("%012x" % uuid.getnode())[:12]
random_int = ("%08x" % random.randint(0, _MAXSIZE))[:8]
def get_unique_id():
global cur_id
cur_id += 1
return "-".join([
node_mac[0:8],
node_mac[8:12],
random_int[0:4],
random_int[4:8],
("%012x" % cur_id)[:12],
])
def _validate_mutable_mappings(a, b):
"""
Internal convenience function to ensure arguments are MutableMappings
This checks that all arguments are MutableMappings or raises an error
:raises AnsibleError: if one of the arguments is not a MutableMapping
"""
# If this becomes generally needed, change the signature to operate on
# a variable number of arguments instead.
if not (isinstance(a, MutableMapping) and isinstance(b, MutableMapping)):
myvars = []
for x in [a, b]:
try:
myvars.append(dumps(x))
except Exception:
myvars.append(to_native(x))
raise AnsibleError("failed to combine variables, expected dicts but got a '{0}' and a '{1}': \n{2}\n{3}".format(
a.__class__.__name__, b.__class__.__name__, myvars[0], myvars[1])
)
def combine_vars(a, b):
"""
Return a copy of dictionaries of variables based on configured hash behavior
"""
if C.DEFAULT_HASH_BEHAVIOUR == "merge":
return merge_hash(a, b)
else:
# HASH_BEHAVIOUR == 'replace'
_validate_mutable_mappings(a, b)
result = a.copy()
result.update(b)
return result
def merge_hash(x, y, recursive=True, list_merge='replace'):
"""
Return a new dictionary result of the merges of y into x,
so that keys from y take precedence over keys from x.
(x and y aren't modified)
"""
if list_merge not in ('replace', 'keep', 'append', 'prepend', 'append_rp', 'prepend_rp'):
raise AnsibleError("merge_hash: 'list_merge' argument can only be equal to 'replace', 'keep', 'append', 'prepend', 'append_rp' or 'prepend_rp'")
# verify x & y are dicts
_validate_mutable_mappings(x, y)
# to speed things up: if x is empty or equal to y, return y
# (this `if` can be remove without impact on the function
# except performance)
if x == {} or x == y:
return y.copy()
# in the following we will copy elements from y to x, but
# we don't want to modify x, so we create a copy of it
x = x.copy()
# to speed things up: use dict.update if possible
# (this `if` can be remove without impact on the function
# except performance)
if not recursive and list_merge == 'replace':
x.update(y)
return x
# insert each element of y in x, overriding the one in x
# (as y has higher priority)
# we copy elements from y to x instead of x to y because
# there is a high probability x will be the "default" dict the user
# want to "patch" with y
# therefore x will have much more elements than y
for key, y_value in iteritems(y):
# if `key` isn't in x
# update x and move on to the next element of y
if key not in x:
x[key] = y_value
continue
# from this point we know `key` is in x
x_value = x[key]
# if both x's element and y's element are dicts
# recursively "combine" them or override x's with y's element
# depending on the `recursive` argument
# and move on to the next element of y
if isinstance(x_value, MutableMapping) and isinstance(y_value, MutableMapping):
if recursive:
x[key] = merge_hash(x_value, y_value, recursive, list_merge)
else:
x[key] = y_value
continue
# if both x's element and y's element are lists
# "merge" them depending on the `list_merge` argument
# and move on to the next element of y
if isinstance(x_value, MutableSequence) and isinstance(y_value, MutableSequence):
if list_merge == 'replace':
# replace x value by y's one as it has higher priority
x[key] = y_value
elif list_merge == 'append':
x[key] = x_value + y_value
elif list_merge == 'prepend':
x[key] = y_value + x_value
elif list_merge == 'append_rp':
# append all elements from y_value (high prio) to x_value (low prio)
# and remove x_value elements that are also in y_value
# we don't remove elements from x_value nor y_value that were already in double
# (we assume that there is a reason if there where such double elements)
# _rp stands for "remove present"
x[key] = [z for z in x_value if z not in y_value] + y_value
elif list_merge == 'prepend_rp':
# same as 'append_rp' but y_value elements are prepend
x[key] = y_value + [z for z in x_value if z not in y_value]
# else 'keep'
# keep x value even if y it's of higher priority
# it's done by not changing x[key]
continue
# else just override x's element with y's one
x[key] = y_value
return x
def load_extra_vars(loader):
extra_vars = {}
for extra_vars_opt in context.CLIARGS.get('extra_vars', tuple()):
data = None
extra_vars_opt = to_text(extra_vars_opt, errors='surrogate_or_strict')
if extra_vars_opt is None or not extra_vars_opt:
continue
if extra_vars_opt.startswith(u"@"):
# Argument is a YAML file (JSON is a subset of YAML)
data = loader.load_from_file(extra_vars_opt[1:])
elif extra_vars_opt[0] in [u'/', u'.']:
raise AnsibleOptionsError("Please prepend extra_vars filename '%s' with '@'" % extra_vars_opt)
elif extra_vars_opt[0] in [u'[', u'{']:
# Arguments as YAML
data = loader.load(extra_vars_opt)
else:
# Arguments as Key-value
data = parse_kv(extra_vars_opt)
if isinstance(data, MutableMapping):
extra_vars = combine_vars(extra_vars, data)
else:
raise AnsibleOptionsError("Invalid extra vars data supplied. '%s' could not be made into a dictionary" % extra_vars_opt)
return extra_vars
def load_options_vars(version):
if version is None:
version = 'Unknown'
options_vars = {'ansible_version': version}
attrs = {'check': 'check_mode',
'diff': 'diff_mode',
'forks': 'forks',
'inventory': 'inventory_sources',
'skip_tags': 'skip_tags',
'subset': 'limit',
'tags': 'run_tags',
'verbosity': 'verbosity'}
for attr, alias in attrs.items():
opt = context.CLIARGS.get(attr)
if opt is not None:
options_vars['ansible_%s' % alias] = opt
return options_vars
def _isidentifier_PY3(ident):
if not isinstance(ident, string_types):
return False
# NOTE Python 3.7 offers str.isascii() so switch over to using it once
# we stop supporting 3.5 and 3.6 on the controller
try:
# Python 2 does not allow non-ascii characters in identifiers so unify
# the behavior for Python 3
ident.encode('ascii')
except UnicodeEncodeError:
return False
if not ident.isidentifier():
return False
if keyword.iskeyword(ident):
return False
return True
def _isidentifier_PY2(ident):
if not isinstance(ident, string_types):
return False
if not ident:
return False
if C.INVALID_VARIABLE_NAMES.search(ident):
return False
if keyword.iskeyword(ident) or ident in ADDITIONAL_PY2_KEYWORDS:
return False
return True
if PY3:
isidentifier = _isidentifier_PY3
else:
isidentifier = _isidentifier_PY2
isidentifier.__doc__ = """Determine if string is valid identifier.
The purpose of this function is to be used to validate any variables created in
a play to be valid Python identifiers and to not conflict with Python keywords
to prevent unexpected behavior. Since Python 2 and Python 3 differ in what
a valid identifier is, this function unifies the validation so playbooks are
portable between the two. The following changes were made:
* disallow non-ascii characters (Python 3 allows for them as opposed to Python 2)
* True, False and None are reserved keywords (these are reserved keywords
on Python 3 as opposed to Python 2)
:arg ident: A text string of indentifier to check. Note: It is callers
responsibility to convert ident to text if it is not already.
Originally posted at http://stackoverflow.com/a/29586366
"""
| gpl-3.0 |
sonali0901/zulip | zerver/tests/test_tornado.py | 9 | 11648 | # -*- coding: utf-8 -*-
"""WebSocketBaseTestCase is based on combination of Tornado
and Django test systems. It require to use decorator '@gen.coroutine'
for each test case method( see documentation: http://www.tornadoweb.org/en/stable/testing.html).
It requires implementation of 'get_app' method to initialize tornado application and launch it.
"""
from __future__ import absolute_import
from __future__ import print_function
import time
import ujson
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from tornado.gen import Return
from tornado.httpclient import HTTPRequest
from zerver.lib.test_helpers import POSTRequestMock
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import UserProfile
from tornado import gen
from tornado.testing import AsyncHTTPTestCase, gen_test
from tornado.web import Application
from tornado.websocket import websocket_connect
from zerver.tornado.application import create_tornado_application
from zerver.tornado.event_queue import fetch_events
from zerver.tornado.views import get_events_backend
from six.moves.http_cookies import SimpleCookie
from typing import Any, Callable, Generator, Optional
class WebSocketBaseTestCase(AsyncHTTPTestCase, ZulipTestCase):
def setUp(self):
# type: () -> None
#
settings.RUNNING_INSIDE_TORNADO = True
super(WebSocketBaseTestCase, self).setUp()
def tearDown(self):
# type: () -> None
super(WebSocketBaseTestCase, self).setUp()
settings.RUNNING_INSIDE_TORNADO = False
@gen.coroutine
def ws_connect(self, path, cookie_header, compression_options=None):
# type: (str, str, Optional[Any]) -> Generator[Any, Callable[[HTTPRequest, Optional[Any]], Any], None]
request = HTTPRequest(url='ws://127.0.0.1:%d%s' % (self.get_http_port(), path))
request.headers.add('Cookie', cookie_header)
ws = yield websocket_connect(
request,
compression_options=compression_options)
raise gen.Return(ws)
@gen.coroutine
def close(self, ws):
# type: (Any) -> Generator[None, Any, None]
"""Close a websocket connection and wait for the server side.
If we don't wait here, there are sometimes leak warnings in the
tests.
"""
ws.close()
yield self.close_future
class TornadoTestCase(WebSocketBaseTestCase):
def get_app(self):
# type: () -> Application
""" Return tornado app to launch for test cases
"""
return create_tornado_application()
@staticmethod
def tornado_call(view_func, user_profile, post_data):
# type: (Callable[[HttpRequest, UserProfile], HttpResponse], UserProfile, Dict[str, Any]) -> HttpResponse
request = POSTRequestMock(post_data, user_profile)
return view_func(request, user_profile)
@staticmethod
def get_cookie_header(cookies):
# type: (Dict[Any, Any]) -> str
return ';'.join(
["{}={}".format(name, value.value) for name, value in cookies.items()])
def _get_cookies(self, user_profile):
# type: (UserProfile) -> SimpleCookie
resp = self.login_with_return(user_profile.email)
return resp.cookies
@gen.coroutine
def _websocket_auth(self, ws, queue_events_data, cookies):
# type: (Any, Dict[str, Dict[str, str]], SimpleCookie) -> Generator[str, str, None]
auth_queue_id = ':'.join((queue_events_data['response']['queue_id'], '0'))
message = {
"req_id": auth_queue_id,
"type": "auth",
"request": {
"csrf_token": cookies.get('csrftoken').coded_value,
"queue_id": queue_events_data['response']['queue_id'],
"status_inquiries": []
}
}
auth_frame_str = ujson.dumps(message)
ws.write_message(ujson.dumps([auth_frame_str]))
response_ack = yield ws.read_message()
response_message = yield ws.read_message()
raise gen.Return([response_ack, response_message])
@staticmethod
def _get_queue_events_data(email):
# type: (str) -> Dict[str, Dict[str, str]]
user_profile = UserProfile.objects.filter(email=email).first()
events_query = {
'queue_id': None,
'narrow': [],
'handler_id': 0,
'user_profile_email': user_profile.email,
'all_public_streams': False,
'client_type_name': 'website',
'new_queue_data': {
'apply_markdown': True,
'narrow': [],
'user_profile_email': user_profile.email,
'all_public_streams': False,
'realm_id': user_profile.realm_id,
'client_type_name': 'website',
'event_types': None,
'user_profile_id': user_profile.id,
'queue_timeout': 0,
'last_connection_time': time.time()},
'last_event_id': -1,
'event_types': None,
'user_profile_id': user_profile.id,
'dont_block': True,
'lifespan_secs': 0
}
result = fetch_events(events_query)
return result
def _check_message_sending(self, request_id, ack_resp, msg_resp, profile, queue_events_data):
# type: (str, str, str, UserProfile, Dict[str, Dict[str, str]]) -> None
self.assertEqual(ack_resp[0], 'a')
self.assertEqual(
ujson.loads(ack_resp[1:]),
[
{
"type": "ack",
"req_id": request_id
}
])
self.assertEqual(msg_resp[0], 'a')
result = self.tornado_call(get_events_backend, profile,
{"queue_id": queue_events_data['response']['queue_id'],
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
result_content = ujson.loads(result.content)
self.assertEqual(len(result_content['events']), 1)
message_id = result_content['events'][0]['message']['id']
self.assertEqual(
ujson.loads(msg_resp[1:]),
[
{
"type": "response",
"response":
{
"result": "success",
"id": message_id,
"msg": ""
},
"req_id": request_id
}
])
@gen_test
def test_tornado_connect(self):
# type: () -> Generator[str, Any, None]
user_profile = UserProfile.objects.filter(email='hamlet@zulip.com').first()
cookies = self._get_cookies(user_profile)
cookie_header = self.get_cookie_header(cookies)
ws = yield self.ws_connect('/sockjs/366/v8nw22qe/websocket', cookie_header=cookie_header)
response = yield ws.read_message()
self.assertEqual(response, 'o')
@gen_test
def test_tornado_auth(self):
# type: () -> Generator[str, TornadoTestCase, None]
user_profile = UserProfile.objects.filter(email='hamlet@zulip.com').first()
cookies = self._get_cookies(user_profile)
cookie_header = self.get_cookie_header(cookies)
ws = yield self.ws_connect('/sockjs/366/v8nw22qe/websocket', cookie_header=cookie_header)
yield ws.read_message()
queue_events_data = self._get_queue_events_data(user_profile.email)
request_id = ':'.join((queue_events_data['response']['queue_id'], '0'))
response = yield self._websocket_auth(ws, queue_events_data, cookies)
self.assertEqual(response[0][0], 'a')
self.assertEqual(
ujson.loads(response[0][1:]),
[
{
"type": "ack",
"req_id": request_id
}
])
self.assertEqual(response[1][0], 'a')
self.assertEqual(
ujson.loads(response[1][1:]),
[
{"req_id": request_id,
"response": {
"result": "success",
"status_inquiries": {},
"msg": ""
},
"type": "response"}
])
@gen_test
def test_sending_private_message(self):
# type: () -> Generator[str, Any, None]
user_profile = UserProfile.objects.filter(email='hamlet@zulip.com').first()
cookies = self._get_cookies(user_profile)
cookie_header = self.get_cookie_header(cookies)
queue_events_data = self._get_queue_events_data(user_profile.email)
ws = yield self.ws_connect('/sockjs/366/v8nw22qe/websocket', cookie_header=cookie_header)
yield ws.read_message()
yield self._websocket_auth(ws, queue_events_data, cookies)
request_id = ':'.join((queue_events_data['response']['queue_id'], '1'))
user_message = {
"req_id": request_id,
"type": "request",
"request": {
"client": "website",
"type": "private",
"subject": "(no topic)",
"stream": "",
"private_message_recipient": "othello@zulip.com",
"content": "hello",
"sender_id": user_profile.id,
"queue_id": queue_events_data['response']['queue_id'],
"to": ujson.dumps(["othello@zulip.com"]),
"reply_to": "hamlet@zulip.com",
"local_id": -1
}
}
user_message_str = ujson.dumps(user_message)
ws.write_message(ujson.dumps([user_message_str]))
ack_resp = yield ws.read_message()
msg_resp = yield ws.read_message()
self._check_message_sending(request_id, ack_resp, msg_resp, user_profile, queue_events_data)
@gen_test
def test_sending_stream_message(self):
# type: () -> Generator[str, Any, None]
user_profile = UserProfile.objects.filter(email='hamlet@zulip.com').first()
cookies = self._get_cookies(user_profile)
cookie_header = self.get_cookie_header(cookies)
queue_events_data = self._get_queue_events_data(user_profile.email)
ws = yield self.ws_connect('/sockjs/366/v8nw22qe/websocket', cookie_header=cookie_header)
yield ws.read_message()
yield self._websocket_auth(ws, queue_events_data, cookies)
request_id = ':'.join((queue_events_data['response']['queue_id'], '1'))
user_message = {
"req_id": request_id,
"type": "request",
"request": {
"client": "website",
"type": "stream",
"subject": "Stream message",
"stream": "Denmark",
"private_message_recipient": "",
"content": "hello",
"sender_id": user_profile.id,
"queue_id": queue_events_data['response']['queue_id'],
"to": ujson.dumps(["Denmark"]),
"reply_to": "hamlet@zulip.com",
"local_id": -1
}
}
user_message_str = ujson.dumps(user_message)
ws.write_message(ujson.dumps([user_message_str]))
ack_resp = yield ws.read_message()
msg_resp = yield ws.read_message()
self._check_message_sending(request_id, ack_resp, msg_resp, user_profile, queue_events_data)
| apache-2.0 |
Automatic/paramiko | paramiko/compress.py | 62 | 1245 | # Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Compression implementations for a Transport.
"""
import zlib
class ZlibCompressor (object):
def __init__(self):
self.z = zlib.compressobj(9)
def __call__(self, data):
return self.z.compress(data) + self.z.flush(zlib.Z_FULL_FLUSH)
class ZlibDecompressor (object):
def __init__(self):
self.z = zlib.decompressobj()
def __call__(self, data):
return self.z.decompress(data)
| lgpl-2.1 |
crodrigues96/mtasa-blue | vendor/google-breakpad/src/tools/gyp/test/mac/gyptest-depend-on-bundle.py | 303 | 1186 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that a dependency on a bundle causes the whole bundle to be built.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('test.gyp', chdir='depend-on-bundle')
test.build('test.gyp', 'dependent_on_bundle', chdir='depend-on-bundle')
# Binary itself.
test.built_file_must_exist('dependent_on_bundle', chdir='depend-on-bundle')
# Bundle dependency.
test.built_file_must_exist(
'my_bundle.framework/Versions/A/my_bundle',
chdir='depend-on-bundle')
test.built_file_must_exist( # package_framework
'my_bundle.framework/my_bundle',
chdir='depend-on-bundle')
test.built_file_must_exist( # plist
'my_bundle.framework/Versions/A/Resources/Info.plist',
chdir='depend-on-bundle')
test.built_file_must_exist(
'my_bundle.framework/Versions/A/Resources/English.lproj/' # Resources
'InfoPlist.strings',
chdir='depend-on-bundle')
test.pass_test()
| gpl-3.0 |
newsters/coinrail | balance.py | 1 | 1155 | """
잔고확인
"""
import base64
import simplejson as json
import hashlib
import hmac
import httplib2
import time
ACCESS_KEY = ''
SECRET_KEY = ''
def get_encoded_payload(payload):
dumped_json = json.dumps(payload)
encoded_json = base64.b64encode(dumped_json)
return encoded_json
def get_signature(encoded_payload, secret_key):
signature = hmac.new(str(secret_key), str(encoded_payload), hashlib.sha512);
return signature.hexdigest()
def get_response(url, payload):
encoded_payload = get_encoded_payload(payload)
headers = {
'content-type': 'application/json',
'X-COINRAIL-PAYLOAD': encoded_payload,
'X-COINRAIL-SIGNATURE': get_signature(encoded_payload, SECRET_KEY)
}
http = httplib2.Http()
response, content = http.request(url, 'POST', headers=headers, body=encoded_payload)
return content
def get_balance():
url = 'https://api.coinrail.co.kr/balance'
payload = {
"access_key": ACCESS_KEY,
"timestamp" : int(round(time.time() * 1000))
}
response = get_response(url, payload)
print response
content = json.loads(response)
return content
if __name__ == "__main__":
print get_balance() | mit |
tnkteja/myhelp | virtualEnvironment/lib/python2.7/site.py | 784 | 27543 | """Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
In earlier versions of Python (up to 1.5a3), scripts or modules that
needed to use site-specific modules would place ``import site''
somewhere near the top of their code. Because of the automatic
import, this is no longer necessary (but code that does it still
works).
This will append site-specific paths to the module search path. On
Unix, it starts with sys.prefix and sys.exec_prefix (if different) and
appends lib/python<version>/site-packages as well as lib/site-python.
It also supports the Debian convention of
lib/python<version>/dist-packages. On other platforms (mainly Mac and
Windows), it uses just sys.prefix (and sys.exec_prefix, if different,
but this is unlikely). The resulting directories, if they exist, are
appended to sys.path, and also inspected for path configuration files.
FOR DEBIAN, this sys.path is augmented with directories in /usr/local.
Local addons go into /usr/local/lib/python<version>/site-packages
(resp. /usr/local/lib/site-python), Debian addons install into
/usr/{lib,share}/python<version>/dist-packages.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.X/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.X/site-packages/bar
/usr/local/lib/python2.X/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
After these path manipulations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys
import os
try:
import __builtin__ as builtins
except ImportError:
import builtins
try:
set
except NameError:
from sets import Set as set
# Prefixes for site-packages; add additional prefixes like /usr/local here
PREFIXES = [sys.prefix, sys.exec_prefix]
# Enable per user site-packages directory
# set it to False to disable the feature or True to force the feature
ENABLE_USER_SITE = None
# for distutils.commands.install
USER_SITE = None
USER_BASE = None
_is_64bit = (getattr(sys, 'maxsize', None) or getattr(sys, 'maxint')) > 2**32
_is_pypy = hasattr(sys, 'pypy_version_info')
_is_jython = sys.platform[:4] == 'java'
if _is_jython:
ModuleType = type(os)
def makepath(*paths):
dir = os.path.join(*paths)
if _is_jython and (dir == '__classpath__' or
dir.startswith('__pyclasspath__')):
return dir, dir
dir = os.path.abspath(dir)
return dir, os.path.normcase(dir)
def abs__file__():
"""Set all module' __file__ attribute to an absolute path"""
for m in sys.modules.values():
if ((_is_jython and not isinstance(m, ModuleType)) or
hasattr(m, '__loader__')):
# only modules need the abspath in Jython. and don't mess
# with a PEP 302-supplied __file__
continue
f = getattr(m, '__file__', None)
if f is None:
continue
m.__file__ = os.path.abspath(f)
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
# XXX This should not be part of site.py, since it is needed even when
# using the -S option for Python. See http://www.python.org/sf/586680
def addbuilddir():
"""Append ./build/lib.<platform> in case we're running in the build dir
(especially for Guido :-)"""
from distutils.util import get_platform
s = "build/lib.%s-%.3s" % (get_platform(), sys.version)
if hasattr(sys, 'gettotalrefcount'):
s += '-pydebug'
s = os.path.join(os.path.dirname(sys.path[-1]), s)
sys.path.append(s)
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Add a new path to known_paths by combining sitedir and 'name' or execute
sitedir if it starts with 'import'"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "rU")
except IOError:
return
try:
for line in f:
if line.startswith("#"):
continue
if line.startswith("import"):
exec(line)
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
finally:
f.close()
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
names.sort()
for name in names:
if name.endswith(os.extsep + "pth"):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def addsitepackages(known_paths, sys_prefix=sys.prefix, exec_prefix=sys.exec_prefix):
"""Add site-packages (and possibly site-python) to sys.path"""
prefixes = [os.path.join(sys_prefix, "local"), sys_prefix]
if exec_prefix != sys_prefix:
prefixes.append(os.path.join(exec_prefix, "local"))
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')]
elif sys.platform == 'darwin' and prefix == sys_prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(prefix, "Extras", "lib", "python")]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib",
"python" + sys.version[:3], "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python"),
os.path.join(prefix, "python" + sys.version[:3], "lib-dynload")]
lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
if (os.path.exists(lib64_dir) and
os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):
if _is_64bit:
sitedirs.insert(0, lib64_dir)
else:
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
except AttributeError:
pass
# Debian-specific dist-packages directories:
sitedirs.append(os.path.join(prefix, "local/lib",
"python" + sys.version[:3],
"dist-packages"))
if sys.version[0] == '2':
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"dist-packages"))
else:
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[0],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return None
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if hasattr(sys, 'flags') and getattr(sys.flags, 'no_user_site', False):
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
USER_BASE is the root directory for all Python versions
USER_SITE is the user specific site-packages directory
USER_SITE/.. can be used for data.
"""
global USER_BASE, USER_SITE, ENABLE_USER_SITE
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
#if sys.platform in ('os2emx', 'riscos'):
# # Don't know what to put here
# USER_BASE = ''
# USER_SITE = ''
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser(base, "Python")
USER_SITE = os.path.join(USER_BASE,
"Python" + sys.version[0] + sys.version[2],
"site-packages")
else:
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser("~", ".local")
USER_SITE = os.path.join(USER_BASE, "lib",
"python" + sys.version[:3],
"site-packages")
if ENABLE_USER_SITE and os.path.isdir(USER_SITE):
addsitedir(USER_SITE, known_paths)
if ENABLE_USER_SITE:
for dist_libdir in ("lib", "local/lib"):
user_site = os.path.join(USER_BASE, dist_libdir,
"python" + sys.version[:3],
"dist-packages")
if os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths
def setBEGINLIBPATH():
"""The OS/2 EMX port has optional extension modules that do double duty
as DLLs (and must use the .DLL file extension) for other extensions.
The library search path needs to be amended so these will be found
during module import. Use BEGINLIBPATH so that these are at the start
of the library search path.
"""
dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload")
libpath = os.environ['BEGINLIBPATH'].split(';')
if libpath[-1]:
libpath.append(dllpath)
else:
libpath[-1] = dllpath
os.environ['BEGINLIBPATH'] = ';'.join(libpath)
def setquit():
"""Define new built-ins 'quit' and 'exit'.
These are simply strings that display a hint on how to exit.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
builtins.quit = Quitter('quit')
builtins.exit = Quitter('exit')
class _Printer(object):
"""interactive prompt objects for printing the license text, a list of
contributors and the copyright notice."""
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for filename in self.__files:
filename = os.path.join(dir, filename)
try:
fp = open(filename, "rU")
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print(self.__lines[i])
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
try:
key = raw_input(prompt)
except NameError:
key = input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
def setcopyright():
"""Set 'copyright' and 'credits' in __builtin__"""
builtins.copyright = _Printer("copyright", sys.copyright)
if _is_jython:
builtins.credits = _Printer(
"credits",
"Jython is maintained by the Jython developers (www.jython.org).")
elif _is_pypy:
builtins.credits = _Printer(
"credits",
"PyPy is maintained by the PyPy developers: http://pypy.org/")
else:
builtins.credits = _Printer("credits", """\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.""")
here = os.path.dirname(os.__file__)
builtins.license = _Printer(
"license", "See http://www.python.org/%.3s/license.html" % sys.version,
["LICENSE.txt", "LICENSE"],
[os.path.join(here, os.pardir), here, os.curdir])
class _Helper(object):
"""Define the built-in 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
builtins.help = _Helper()
def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
def setencoding():
"""Set the string encoding used by the Unicode implementation. The
default is 'ascii', but if you're willing to experiment, you can
change this."""
encoding = "ascii" # Default value set by _PyUnicode_Init()
if 0:
# Enable to support locale aware default string encodings.
import locale
loc = locale.getdefaultlocale()
if loc[1]:
encoding = loc[1]
if 0:
# Enable to switch off string to Unicode coercion and implicit
# Unicode to string conversion.
encoding = "undefined"
if encoding != "ascii":
# On Non-Unicode builds this will raise an AttributeError...
sys.setdefaultencoding(encoding) # Needs Python Unicode build !
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except ImportError:
pass
def virtual_install_main_packages():
f = open(os.path.join(os.path.dirname(__file__), 'orig-prefix.txt'))
sys.real_prefix = f.read().strip()
f.close()
pos = 2
hardcoded_relative_dirs = []
if sys.path[0] == '':
pos += 1
if _is_jython:
paths = [os.path.join(sys.real_prefix, 'Lib')]
elif _is_pypy:
if sys.version_info > (3, 2):
cpyver = '%d' % sys.version_info[0]
elif sys.pypy_version_info >= (1, 5):
cpyver = '%d.%d' % sys.version_info[:2]
else:
cpyver = '%d.%d.%d' % sys.version_info[:3]
paths = [os.path.join(sys.real_prefix, 'lib_pypy'),
os.path.join(sys.real_prefix, 'lib-python', cpyver)]
if sys.pypy_version_info < (1, 9):
paths.insert(1, os.path.join(sys.real_prefix,
'lib-python', 'modified-%s' % cpyver))
hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below
#
# This is hardcoded in the Python executable, but relative to sys.prefix:
for path in paths[:]:
plat_path = os.path.join(path, 'plat-%s' % sys.platform)
if os.path.exists(plat_path):
paths.append(plat_path)
elif sys.platform == 'win32':
paths = [os.path.join(sys.real_prefix, 'Lib'), os.path.join(sys.real_prefix, 'DLLs')]
else:
paths = [os.path.join(sys.real_prefix, 'lib', 'python'+sys.version[:3])]
hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below
lib64_path = os.path.join(sys.real_prefix, 'lib64', 'python'+sys.version[:3])
if os.path.exists(lib64_path):
if _is_64bit:
paths.insert(0, lib64_path)
else:
paths.append(lib64_path)
# This is hardcoded in the Python executable, but relative to
# sys.prefix. Debian change: we need to add the multiarch triplet
# here, which is where the real stuff lives. As per PEP 421, in
# Python 3.3+, this lives in sys.implementation, while in Python 2.7
# it lives in sys.
try:
arch = getattr(sys, 'implementation', sys)._multiarch
except AttributeError:
# This is a non-multiarch aware Python. Fallback to the old way.
arch = sys.platform
plat_path = os.path.join(sys.real_prefix, 'lib',
'python'+sys.version[:3],
'plat-%s' % arch)
if os.path.exists(plat_path):
paths.append(plat_path)
# This is hardcoded in the Python executable, but
# relative to sys.prefix, so we have to fix up:
for path in list(paths):
tk_dir = os.path.join(path, 'lib-tk')
if os.path.exists(tk_dir):
paths.append(tk_dir)
# These are hardcoded in the Apple's Python executable,
# but relative to sys.prefix, so we have to fix them up:
if sys.platform == 'darwin':
hardcoded_paths = [os.path.join(relative_dir, module)
for relative_dir in hardcoded_relative_dirs
for module in ('plat-darwin', 'plat-mac', 'plat-mac/lib-scriptpackages')]
for path in hardcoded_paths:
if os.path.exists(path):
paths.append(path)
sys.path.extend(paths)
def force_global_eggs_after_local_site_packages():
"""
Force easy_installed eggs in the global environment to get placed
in sys.path after all packages inside the virtualenv. This
maintains the "least surprise" result that packages in the
virtualenv always mask global packages, never the other way
around.
"""
egginsert = getattr(sys, '__egginsert', 0)
for i, path in enumerate(sys.path):
if i > egginsert and path.startswith(sys.prefix):
egginsert = i
sys.__egginsert = egginsert + 1
def virtual_addsitepackages(known_paths):
force_global_eggs_after_local_site_packages()
return addsitepackages(known_paths, sys_prefix=sys.real_prefix)
def fixclasspath():
"""Adjust the special classpath sys.path entries for Jython. These
entries should follow the base virtualenv lib directories.
"""
paths = []
classpaths = []
for path in sys.path:
if path == '__classpath__' or path.startswith('__pyclasspath__'):
classpaths.append(path)
else:
paths.append(path)
sys.path = paths
sys.path.extend(classpaths)
def execusercustomize():
"""Run custom user specific code, if available."""
try:
import usercustomize
except ImportError:
pass
def main():
global ENABLE_USER_SITE
virtual_install_main_packages()
abs__file__()
paths_in_sys = removeduppaths()
if (os.name == "posix" and sys.path and
os.path.basename(sys.path[-1]) == "Modules"):
addbuilddir()
if _is_jython:
fixclasspath()
GLOBAL_SITE_PACKAGES = not os.path.exists(os.path.join(os.path.dirname(__file__), 'no-global-site-packages.txt'))
if not GLOBAL_SITE_PACKAGES:
ENABLE_USER_SITE = False
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
paths_in_sys = addsitepackages(paths_in_sys)
paths_in_sys = addusersitepackages(paths_in_sys)
if GLOBAL_SITE_PACKAGES:
paths_in_sys = virtual_addsitepackages(paths_in_sys)
if sys.platform == 'os2emx':
setBEGINLIBPATH()
setquit()
setcopyright()
sethelper()
aliasmbcs()
setencoding()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
# Remove sys.setdefaultencoding() so that users cannot change the
# encoding after initialization. The test for presence is needed when
# this module is run as a script, because this code is executed twice.
if hasattr(sys, "setdefaultencoding"):
del sys.setdefaultencoding
main()
def _script():
help = """\
%s [--user-base] [--user-site]
Without arguments print some useful information
With arguments print the value of USER_BASE and/or USER_SITE separated
by '%s'.
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
2 - uses site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
args = sys.argv[1:]
if not args:
print("sys.path = [")
for dir in sys.path:
print(" %r," % (dir,))
print("]")
def exists(path):
if os.path.isdir(path):
return "exists"
else:
return "doesn't exist"
print("USER_BASE: %r (%s)" % (USER_BASE, exists(USER_BASE)))
print("USER_SITE: %r (%s)" % (USER_SITE, exists(USER_BASE)))
print("ENABLE_USER_SITE: %r" % ENABLE_USER_SITE)
sys.exit(0)
buffer = []
if '--user-base' in args:
buffer.append(USER_BASE)
if '--user-site' in args:
buffer.append(USER_SITE)
if buffer:
print(os.pathsep.join(buffer))
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print(textwrap.dedent(help % (sys.argv[0], os.pathsep)))
sys.exit(10)
if __name__ == '__main__':
_script()
| mit |
Lekensteyn/buildbot | master/buildbot/test/unit/test_data_workers.py | 9 | 9322 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import mock
from twisted.internet import defer
from twisted.trial import unittest
from buildbot.data import workers
from buildbot.test.fake import fakedb
from buildbot.test.fake import fakemaster
from buildbot.test.util import endpoint
from buildbot.test.util import interfaces
testData = [
fakedb.Builder(id=40, name=u'b1'),
fakedb.Builder(id=41, name=u'b2'),
fakedb.Master(id=13),
fakedb.Master(id=14),
fakedb.BuilderMaster(id=4013, builderid=40, masterid=13),
fakedb.BuilderMaster(id=4014, builderid=40, masterid=14),
fakedb.BuilderMaster(id=4113, builderid=41, masterid=13),
fakedb.Worker(id=1, name=u'linux', info={}),
fakedb.ConfiguredWorker(id=14013,
workerid=1, buildermasterid=4013),
fakedb.ConfiguredWorker(id=14014,
workerid=1, buildermasterid=4014),
fakedb.ConnectedWorker(id=113, masterid=13, workerid=1),
fakedb.Worker(id=2, name=u'windows', info={"a": "b"}),
fakedb.ConfiguredWorker(id=24013,
workerid=2, buildermasterid=4013),
fakedb.ConfiguredWorker(id=24014,
workerid=2, buildermasterid=4014),
fakedb.ConfiguredWorker(id=24113,
workerid=2, buildermasterid=4113),
fakedb.ConnectedWorker(id=214, masterid=14, workerid=2),
]
def configuredOnKey(worker):
return (worker.get('masterid', 0),
worker.get('builderid', 0))
def _filt(bs, builderid, masterid):
bs['connected_to'] = sorted(
[d for d in bs['connected_to']
if not masterid or masterid == d['masterid']])
bs['configured_on'] = sorted(
[d for d in bs['configured_on']
if (not masterid or masterid == d['masterid'])
and (not builderid or builderid == d['builderid'])], key=configuredOnKey)
return bs
def w1(builderid=None, masterid=None):
return _filt({
'workerid': 1,
'name': 'linux',
'workerinfo': {},
'connected_to': [
{'masterid': 13},
],
'configured_on': sorted([
{'builderid': 40, 'masterid': 13},
{'builderid': 40, 'masterid': 14},
], key=configuredOnKey),
}, builderid, masterid)
def w2(builderid=None, masterid=None):
return _filt({
'workerid': 2,
'name': 'windows',
'workerinfo': {'a': 'b'},
'connected_to': [
{'masterid': 14},
],
'configured_on': sorted([
{'builderid': 40, 'masterid': 13},
{'builderid': 41, 'masterid': 13},
{'builderid': 40, 'masterid': 14},
], key=configuredOnKey),
}, builderid, masterid)
class WorkerEndpoint(endpoint.EndpointMixin, unittest.TestCase):
endpointClass = workers.WorkerEndpoint
resourceTypeClass = workers.Worker
def setUp(self):
self.setUpEndpoint()
return self.db.insertTestData(testData)
def tearDown(self):
self.tearDownEndpoint()
def test_get_existing(self):
d = self.callGet(('workers', 2))
@d.addCallback
def check(worker):
self.validateData(worker)
worker['configured_on'] = sorted(
worker['configured_on'], key=configuredOnKey)
self.assertEqual(worker, w2())
return d
def test_get_existing_name(self):
d = self.callGet(('workers', 'linux'))
@d.addCallback
def check(worker):
self.validateData(worker)
worker['configured_on'] = sorted(
worker['configured_on'], key=configuredOnKey)
self.assertEqual(worker, w1())
return d
def test_get_existing_masterid(self):
d = self.callGet(('masters', 14, 'workers', 2))
@d.addCallback
def check(worker):
self.validateData(worker)
worker['configured_on'] = sorted(
worker['configured_on'], key=configuredOnKey)
self.assertEqual(worker, w2(masterid=14))
return d
def test_get_existing_builderid(self):
d = self.callGet(('builders', 40, 'workers', 2))
@d.addCallback
def check(worker):
self.validateData(worker)
worker['configured_on'] = sorted(
worker['configured_on'], key=configuredOnKey)
self.assertEqual(worker, w2(builderid=40))
return d
def test_get_existing_masterid_builderid(self):
d = self.callGet(('masters', 13, 'builders', 40, 'workers', 2))
@d.addCallback
def check(worker):
self.validateData(worker)
worker['configured_on'] = sorted(
worker['configured_on'], key=configuredOnKey)
self.assertEqual(worker, w2(masterid=13, builderid=40))
return d
def test_get_missing(self):
d = self.callGet(('workers', 99))
@d.addCallback
def check(worker):
self.assertEqual(worker, None)
return d
class WorkersEndpoint(endpoint.EndpointMixin, unittest.TestCase):
endpointClass = workers.WorkersEndpoint
resourceTypeClass = workers.Worker
def setUp(self):
self.setUpEndpoint()
return self.db.insertTestData(testData)
def tearDown(self):
self.tearDownEndpoint()
def test_get(self):
d = self.callGet(('workers',))
@d.addCallback
def check(workers):
for b in workers:
self.validateData(b)
b['configured_on'] = sorted(b['configured_on'],
key=configuredOnKey)
self.assertEqual(sorted(workers, key=configuredOnKey),
sorted([w1(), w2()], key=configuredOnKey))
return d
def test_get_masterid(self):
d = self.callGet(('masters', '13', 'workers',))
@d.addCallback
def check(workers):
[self.validateData(b) for b in workers]
[sorted(b['configured_on'], key=configuredOnKey) for b in workers]
self.assertEqual(sorted(workers, key=configuredOnKey),
sorted([w1(masterid=13), w2(masterid=13)], key=configuredOnKey))
return d
def test_get_builderid(self):
d = self.callGet(('builders', '41', 'workers',))
@d.addCallback
def check(workers):
[self.validateData(b) for b in workers]
[sorted(b['configured_on'], key=configuredOnKey) for b in workers]
self.assertEqual(sorted(workers, key=configuredOnKey),
sorted([w2(builderid=41)], key=configuredOnKey))
return d
def test_get_masterid_builderid(self):
d = self.callGet(('masters', '13', 'builders', '41', 'workers',))
@d.addCallback
def check(workers):
[self.validateData(b) for b in workers]
[sorted(b['configured_on'], key=configuredOnKey) for b in workers]
self.assertEqual(sorted(workers, key=configuredOnKey),
sorted([w2(masterid=13, builderid=41)], key=configuredOnKey))
return d
class Worker(interfaces.InterfaceTests, unittest.TestCase):
def setUp(self):
self.master = fakemaster.make_master(testcase=self,
wantMq=True, wantDb=True, wantData=True)
self.rtype = workers.Worker(self.master)
return self.master.db.insertTestData([
fakedb.Master(id=13),
fakedb.Master(id=14),
])
def test_signature_findWorkerId(self):
@self.assertArgSpecMatches(
self.master.data.updates.findWorkerId, # fake
self.rtype.findWorkerId) # real
def findWorkerId(self, name):
pass
def test_signature_workerConfigured(self):
@self.assertArgSpecMatches(
self.master.data.updates.workerConfigured, # fake
self.rtype.workerConfigured) # real
def workerConfigured(self, workerid, masterid, builderids):
pass
def test_findWorkerId(self):
# this just passes through to the db method, so test that
rv = defer.succeed(None)
self.master.db.workers.findWorkerId = \
mock.Mock(return_value=rv)
self.assertIdentical(self.rtype.findWorkerId(u'foo'), rv)
def test_findWorkerId_not_id(self):
self.assertRaises(ValueError, self.rtype.findWorkerId, b'foo')
self.assertRaises(ValueError, self.rtype.findWorkerId, u'123/foo')
| gpl-2.0 |
Nihn/Diamond-1 | src/collectors/vmsfs/vmsfs.py | 69 | 3023 | # coding=utf-8
"""
Uses /sys/fs/vmsfs to collect host-global data on VMS memory usage
#### Dependencies
* /sys/fs/vmsfs <- vmsfs package, vmsfs mounted
"""
import diamond.collector
import os
class VMSFSCollector(diamond.collector.Collector):
SYSFS = '/sys/fs/vmsfs'
VMSFS_STATS = {
'resident': ('cur_resident', 4096),
'allocated': ('cur_allocated', 4096)
}
def vmsfs_stats_read(self, filename):
stats = {}
# Open vmsfs sys info.
stats_fd = None
try:
stats_fd = open(filename)
for line in stats_fd:
tokens = line.split()
stats[tokens[0][0:-1]] = long(tokens[1])
except:
if stats_fd:
stats_fd.close()
return stats
def vmsfs_stats_dispatch(self, filename, prefix=''):
stats = self.vmsfs_stats_read(filename)
for stat in self.VMSFS_STATS:
name = self.VMSFS_STATS[stat][0]
scale = self.VMSFS_STATS[stat][1]
if name in stats:
self.publish(prefix + name, stats[name] * scale)
def get_default_config_help(self):
config_help = super(VMSFSCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(VMSFSCollector, self).get_default_config()
config.update({
'path': 'vmsfs'
})
return config
def collect(self):
if not os.access(self.SYSFS, os.R_OK | os.X_OK):
return None
# Dispatch total stats.
self.vmsfs_stats_dispatch(os.path.join(self.SYSFS, 'stats'))
# Dispatch per-generation stats.
# NOTE: We do not currently report the per-generation statistics to
# diamond. This is because we do not have a good strategy for
# aggregating generation data and exposing it in a sensible way. There
# are three strategies:
# 1) Collect everything at the host level.
# The problem here is that the number of metrics will explode for
# that individual host (and keep growing).
# 2) Collect at the top-level (one virtual host per generation).
# Then the problem is finding the generation through UI tools, etc.
# 3) Figure out some way to put the stats in each instance associated
# with that generation.
# We favor (2) currently, but there's not much value in implementing it
# until it can be exposed to the user.
if False:
TO_IGNORE = ('stats', 'version',
'00000000-0000-0000-0000-000000000000')
files = os.listdir(self.SYSFS)
for f in files:
if f not in TO_IGNORE:
self.vmsfs_stats_dispatch('/sys/fs/vmsfs/' + f,
prefix=('%s.' % f))
| mit |
openshift/openshift-tools | openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_utils/filter_plugins/openshift_aws_filters.py | 16 | 3743 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Custom filters for use in openshift_aws
'''
from ansible import errors
class FilterModule(object):
''' Custom ansible filters for use by openshift_aws role'''
@staticmethod
def subnet_count_list(size, subnets):
"""This function will modify create a list of subnets."""
items = {}
count = 0
for _ in range(0, int(size)):
if subnets[count]['subnets'][0]['subnet_id'] in items:
items[subnets[count]['subnets'][0]['subnet_id']] = \
items[subnets[count]['subnets'][0]['subnet_id']] + 1
else:
items[subnets[count]['subnets'][0]['subnet_id']] = 1
if count < (len(subnets) - 1):
count = count + 1
else:
count = 0
return items
@staticmethod
def ec2_to_asg_tag(ec2_tag_info):
''' This function will modify ec2 tag list to an asg dictionary.'''
tags = []
for tag in ec2_tag_info:
for key in tag:
if 'deployment_serial' in key:
l_dict = {'tags': []}
l_dict['tags'].append({'key': 'deployment_serial',
'value': tag[key]})
tags.append(l_dict.copy())
return tags
@staticmethod
def scale_groups_serial(scale_group_info, upgrade=False):
''' This function will determine what the deployment serial should be and return it
Search through the tags and find the deployment_serial tag. Once found,
determine if an increment is needed during an upgrade.
if upgrade is true then increment the serial and return it
else return the serial
'''
if scale_group_info == []:
return 1
scale_group_info = scale_group_info[0]
if not isinstance(scale_group_info, dict):
raise errors.AnsibleFilterError("|filter plugin failed: Expected scale_group_info to be a dict")
serial = None
for tag in scale_group_info['tags']:
if tag['key'] == 'deployment_serial':
serial = int(tag['value'])
if upgrade:
serial += 1
break
else:
raise errors.AnsibleFilterError("|filter plugin failed: deployment_serial tag was not found")
return serial
@staticmethod
def scale_groups_match_capacity(scale_group_info):
''' This function will verify that the scale group instance count matches
the scale group desired capacity
'''
for scale_group in scale_group_info:
if scale_group['desired_capacity'] != len(scale_group['instances']):
return False
return True
@staticmethod
def build_instance_tags(clusterid):
''' This function will return a dictionary of the instance tags.
The main desire to have this inside of a filter_plugin is that we
need to build the following key.
{"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": "{{ openshift_aws_clusterid}}"}
'''
tags = {'clusterid': clusterid,
'kubernetes.io/cluster/{}'.format(clusterid): clusterid}
return tags
def filters(self):
''' returns a mapping of filters to methods '''
return {'build_instance_tags': self.build_instance_tags,
'scale_groups_match_capacity': self.scale_groups_match_capacity,
'scale_groups_serial': self.scale_groups_serial,
'ec2_to_asg_tag': self.ec2_to_asg_tag,
'subnet_count_list': self.subnet_count_list}
| apache-2.0 |
konstruktoid/ansible-upstream | lib/ansible/modules/network/nxos/nxos_ospf.py | 100 | 4148 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_ospf
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages configuration of an ospf instance.
description:
- Manages configuration of an ospf instance.
author: Gabriele Gerbino (@GGabriele)
options:
ospf:
description:
- Name of the ospf instance.
required: true
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_ospf:
ospf: 1
state: present
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["router ospf 1"]
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import CustomNetworkConfig
PARAM_TO_COMMAND_KEYMAP = {
'ospf': 'router ospf'
}
def get_value(config, module):
splitted_config = config.splitlines()
value_list = []
REGEX = r'^router ospf\s(?P<ospf>\S+).*'
for line in splitted_config:
value = ''
if 'router ospf' in line:
try:
match_ospf = re.match(REGEX, line, re.DOTALL)
ospf_group = match_ospf.groupdict()
value = ospf_group['ospf']
except AttributeError:
value = ''
if value:
value_list.append(value)
return value_list
def get_existing(module):
existing = {}
config = str(get_config(module))
value = get_value(config, module)
if value:
existing['ospf'] = value
return existing
def state_present(module, proposed, candidate):
commands = ['router ospf {0}'.format(proposed['ospf'])]
candidate.add(commands, parents=[])
def state_absent(module, proposed, candidate):
commands = ['no router ospf {0}'.format(proposed['ospf'])]
candidate.add(commands, parents=[])
def main():
argument_spec = dict(
ospf=dict(required=True, type='str'),
state=dict(choices=['present', 'absent'], default='present', required=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
state = module.params['state']
ospf = str(module.params['ospf'])
existing = get_existing(module)
proposed = dict(ospf=ospf)
if not existing:
existing_list = []
else:
existing_list = existing['ospf']
candidate = CustomNetworkConfig(indent=3)
if state == 'present' and ospf not in existing_list:
state_present(module, proposed, candidate)
if state == 'absent' and ospf in existing_list:
state_absent(module, proposed, candidate)
if candidate:
candidate = candidate.items_text()
load_config(module, candidate)
result['changed'] = True
result['commands'] = candidate
else:
result['commands'] = []
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
djgagne/scikit-learn | sklearn/feature_extraction/stop_words.py | 290 | 3252 | # This list of English stop words is taken from the "Glasgow Information
# Retrieval Group". The original list can be found at
# http://ir.dcs.gla.ac.uk/resources/linguistic_utils/stop_words
ENGLISH_STOP_WORDS = frozenset([
"a", "about", "above", "across", "after", "afterwards", "again", "against",
"all", "almost", "alone", "along", "already", "also", "although", "always",
"am", "among", "amongst", "amoungst", "amount", "an", "and", "another",
"any", "anyhow", "anyone", "anything", "anyway", "anywhere", "are",
"around", "as", "at", "back", "be", "became", "because", "become",
"becomes", "becoming", "been", "before", "beforehand", "behind", "being",
"below", "beside", "besides", "between", "beyond", "bill", "both",
"bottom", "but", "by", "call", "can", "cannot", "cant", "co", "con",
"could", "couldnt", "cry", "de", "describe", "detail", "do", "done",
"down", "due", "during", "each", "eg", "eight", "either", "eleven", "else",
"elsewhere", "empty", "enough", "etc", "even", "ever", "every", "everyone",
"everything", "everywhere", "except", "few", "fifteen", "fify", "fill",
"find", "fire", "first", "five", "for", "former", "formerly", "forty",
"found", "four", "from", "front", "full", "further", "get", "give", "go",
"had", "has", "hasnt", "have", "he", "hence", "her", "here", "hereafter",
"hereby", "herein", "hereupon", "hers", "herself", "him", "himself", "his",
"how", "however", "hundred", "i", "ie", "if", "in", "inc", "indeed",
"interest", "into", "is", "it", "its", "itself", "keep", "last", "latter",
"latterly", "least", "less", "ltd", "made", "many", "may", "me",
"meanwhile", "might", "mill", "mine", "more", "moreover", "most", "mostly",
"move", "much", "must", "my", "myself", "name", "namely", "neither",
"never", "nevertheless", "next", "nine", "no", "nobody", "none", "noone",
"nor", "not", "nothing", "now", "nowhere", "of", "off", "often", "on",
"once", "one", "only", "onto", "or", "other", "others", "otherwise", "our",
"ours", "ourselves", "out", "over", "own", "part", "per", "perhaps",
"please", "put", "rather", "re", "same", "see", "seem", "seemed",
"seeming", "seems", "serious", "several", "she", "should", "show", "side",
"since", "sincere", "six", "sixty", "so", "some", "somehow", "someone",
"something", "sometime", "sometimes", "somewhere", "still", "such",
"system", "take", "ten", "than", "that", "the", "their", "them",
"themselves", "then", "thence", "there", "thereafter", "thereby",
"therefore", "therein", "thereupon", "these", "they", "thick", "thin",
"third", "this", "those", "though", "three", "through", "throughout",
"thru", "thus", "to", "together", "too", "top", "toward", "towards",
"twelve", "twenty", "two", "un", "under", "until", "up", "upon", "us",
"very", "via", "was", "we", "well", "were", "what", "whatever", "when",
"whence", "whenever", "where", "whereafter", "whereas", "whereby",
"wherein", "whereupon", "wherever", "whether", "which", "while", "whither",
"who", "whoever", "whole", "whom", "whose", "why", "will", "with",
"within", "without", "would", "yet", "you", "your", "yours", "yourself",
"yourselves"])
| bsd-3-clause |
shakalaca/ASUS_ZenFone_ZE601KL | kernel/tools/perf/scripts/python/event_analyzing_sample.py | 4719 | 7393 | # event_analyzing_sample.py: general event handler in python
#
# Current perf report is already very powerful with the annotation integrated,
# and this script is not trying to be as powerful as perf report, but
# providing end user/developer a flexible way to analyze the events other
# than trace points.
#
# The 2 database related functions in this script just show how to gather
# the basic information, and users can modify and write their own functions
# according to their specific requirement.
#
# The first function "show_general_events" just does a basic grouping for all
# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
# for a x86 HW PMU event: PEBS with load latency data.
#
import os
import sys
import math
import struct
import sqlite3
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from EventClass import *
#
# If the perf.data has a big number of samples, then the insert operation
# will be very time consuming (about 10+ minutes for 10000 samples) if the
# .db database is on disk. Move the .db file to RAM based FS to speedup
# the handling, which will cut the time down to several seconds.
#
con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
print "In trace_begin:\n"
#
# Will create several tables at the start, pebs_ll is for PEBS data with
# load latency info, while gen_events is for general event.
#
con.execute("""
create table if not exists gen_events (
name text,
symbol text,
comm text,
dso text
);""")
con.execute("""
create table if not exists pebs_ll (
name text,
symbol text,
comm text,
dso text,
flags integer,
ip integer,
status integer,
dse integer,
dla integer,
lat integer
);""")
#
# Create and insert event object to a database so that user could
# do more analysis with simple database commands.
#
def process_event(param_dict):
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
if (param_dict.has_key("dso")):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
if (param_dict.has_key("symbol")):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
# Create the event object and insert it to the right table in database
event = create_event(name, comm, dso, symbol, raw_buf)
insert_db(event)
def insert_db(event):
if event.ev_type == EVTYPE_GENERIC:
con.execute("insert into gen_events values(?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso))
elif event.ev_type == EVTYPE_PEBS_LL:
event.ip &= 0x7fffffffffffffff
event.dla &= 0x7fffffffffffffff
con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso, event.flags,
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
print "In trace_end:\n"
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
con.close()
#
# As the event number may be very big, so we can't use linear way
# to show the histogram in real number, but use a log2 algorithm.
#
def num2sym(num):
# Each number will have at least one '#'
snum = '#' * (int)(math.log(num, 2) + 1)
return snum
def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
print "There is %d records in gen_events table" % t[0]
if t[0] == 0:
return
print "Statistics about the general events grouped by thread/symbol/dso: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dso
print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
#
# This function just shows the basic info, and we could do more with the
# data in the tables, like checking the function parameters when some
# big latency events happen.
#
def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
print "There is %d records in pebs_ll table" % t[0]
if t[0] == 0:
return
print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
for row in dseq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
for row in latq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
| gpl-2.0 |
2013Commons/HUE-SHARK | desktop/core/ext-py/Django-1.2.3/build/lib.linux-i686-2.7/django/contrib/gis/tests/layermap/models.py | 161 | 2179 | from django.contrib.gis.db import models
class State(models.Model):
name = models.CharField(max_length=20)
objects = models.GeoManager()
class County(models.Model):
name = models.CharField(max_length=25)
state = models.ForeignKey(State)
mpoly = models.MultiPolygonField(srid=4269) # Multipolygon in NAD83
objects = models.GeoManager()
class CountyFeat(models.Model):
name = models.CharField(max_length=25)
poly = models.PolygonField(srid=4269)
objects = models.GeoManager()
class City(models.Model):
name = models.CharField(max_length=25)
population = models.IntegerField()
density = models.DecimalField(max_digits=7, decimal_places=1)
dt = models.DateField()
point = models.PointField()
objects = models.GeoManager()
class Interstate(models.Model):
name = models.CharField(max_length=20)
length = models.DecimalField(max_digits=6, decimal_places=2)
path = models.LineStringField()
objects = models.GeoManager()
# Same as `City` above, but for testing model inheritance.
class CityBase(models.Model):
name = models.CharField(max_length=25)
population = models.IntegerField()
density = models.DecimalField(max_digits=7, decimal_places=1)
point = models.PointField()
objects = models.GeoManager()
class ICity1(CityBase):
dt = models.DateField()
class ICity2(ICity1):
dt_time = models.DateTimeField(auto_now=True)
# Mapping dictionaries for the models above.
co_mapping = {'name' : 'Name',
'state' : {'name' : 'State'}, # ForeignKey's use another mapping dictionary for the _related_ Model (State in this case).
'mpoly' : 'MULTIPOLYGON', # Will convert POLYGON features into MULTIPOLYGONS.
}
cofeat_mapping = {'name' : 'Name',
'poly' : 'POLYGON',
}
city_mapping = {'name' : 'Name',
'population' : 'Population',
'density' : 'Density',
'dt' : 'Created',
'point' : 'POINT',
}
inter_mapping = {'name' : 'Name',
'length' : 'Length',
'path' : 'LINESTRING',
}
| apache-2.0 |
soldag/home-assistant | homeassistant/components/cloud/__init__.py | 9 | 7067 | """Component to integrate the Home Assistant cloud."""
from hass_nabucasa import Cloud
import voluptuous as vol
from homeassistant.components.alexa import const as alexa_const
from homeassistant.components.google_assistant import const as ga_c
from homeassistant.const import (
CONF_MODE,
CONF_NAME,
CONF_REGION,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv, entityfilter
from homeassistant.loader import bind_hass
from homeassistant.util.aiohttp import MockRequest
from . import account_link, http_api
from .client import CloudClient
from .const import (
CONF_ACCOUNT_LINK_URL,
CONF_ACME_DIRECTORY_SERVER,
CONF_ALEXA,
CONF_ALEXA_ACCESS_TOKEN_URL,
CONF_ALIASES,
CONF_CLOUDHOOK_CREATE_URL,
CONF_COGNITO_CLIENT_ID,
CONF_ENTITY_CONFIG,
CONF_FILTER,
CONF_GOOGLE_ACTIONS,
CONF_GOOGLE_ACTIONS_REPORT_STATE_URL,
CONF_RELAYER,
CONF_REMOTE_API_URL,
CONF_SUBSCRIPTION_INFO_URL,
CONF_USER_POOL_ID,
CONF_VOICE_API_URL,
DOMAIN,
MODE_DEV,
MODE_PROD,
)
from .prefs import CloudPreferences
DEFAULT_MODE = MODE_PROD
SERVICE_REMOTE_CONNECT = "remote_connect"
SERVICE_REMOTE_DISCONNECT = "remote_disconnect"
ALEXA_ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(alexa_const.CONF_DESCRIPTION): cv.string,
vol.Optional(alexa_const.CONF_DISPLAY_CATEGORIES): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
GOOGLE_ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ALIASES): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ga_c.CONF_ROOM_HINT): cv.string,
}
)
ASSISTANT_SCHEMA = vol.Schema(
{vol.Optional(CONF_FILTER, default=dict): entityfilter.FILTER_SCHEMA}
)
ALEXA_SCHEMA = ASSISTANT_SCHEMA.extend(
{vol.Optional(CONF_ENTITY_CONFIG): {cv.entity_id: ALEXA_ENTITY_SCHEMA}}
)
GACTIONS_SCHEMA = ASSISTANT_SCHEMA.extend(
{vol.Optional(CONF_ENTITY_CONFIG): {cv.entity_id: GOOGLE_ENTITY_SCHEMA}}
)
# pylint: disable=no-value-for-parameter
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_MODE, default=DEFAULT_MODE): vol.In(
[MODE_DEV, MODE_PROD]
),
vol.Optional(CONF_COGNITO_CLIENT_ID): str,
vol.Optional(CONF_USER_POOL_ID): str,
vol.Optional(CONF_REGION): str,
vol.Optional(CONF_RELAYER): str,
vol.Optional(CONF_SUBSCRIPTION_INFO_URL): vol.Url(),
vol.Optional(CONF_CLOUDHOOK_CREATE_URL): vol.Url(),
vol.Optional(CONF_REMOTE_API_URL): vol.Url(),
vol.Optional(CONF_ACME_DIRECTORY_SERVER): vol.Url(),
vol.Optional(CONF_ALEXA): ALEXA_SCHEMA,
vol.Optional(CONF_GOOGLE_ACTIONS): GACTIONS_SCHEMA,
vol.Optional(CONF_ALEXA_ACCESS_TOKEN_URL): vol.Url(),
vol.Optional(CONF_GOOGLE_ACTIONS_REPORT_STATE_URL): vol.Url(),
vol.Optional(CONF_ACCOUNT_LINK_URL): vol.Url(),
vol.Optional(CONF_VOICE_API_URL): vol.Url(),
}
)
},
extra=vol.ALLOW_EXTRA,
)
class CloudNotAvailable(HomeAssistantError):
"""Raised when an action requires the cloud but it's not available."""
@bind_hass
@callback
def async_is_logged_in(hass) -> bool:
"""Test if user is logged in."""
return DOMAIN in hass.data and hass.data[DOMAIN].is_logged_in
@bind_hass
@callback
def async_active_subscription(hass) -> bool:
"""Test if user has an active subscription."""
return async_is_logged_in(hass) and not hass.data[DOMAIN].subscription_expired
@bind_hass
async def async_create_cloudhook(hass, webhook_id: str) -> str:
"""Create a cloudhook."""
if not async_is_logged_in(hass):
raise CloudNotAvailable
hook = await hass.data[DOMAIN].cloudhooks.async_create(webhook_id, True)
return hook["cloudhook_url"]
@bind_hass
async def async_delete_cloudhook(hass, webhook_id: str) -> None:
"""Delete a cloudhook."""
if DOMAIN not in hass.data:
raise CloudNotAvailable
await hass.data[DOMAIN].cloudhooks.async_delete(webhook_id)
@bind_hass
@callback
def async_remote_ui_url(hass) -> str:
"""Get the remote UI URL."""
if not async_is_logged_in(hass):
raise CloudNotAvailable
if not hass.data[DOMAIN].client.prefs.remote_enabled:
raise CloudNotAvailable
if not hass.data[DOMAIN].remote.instance_domain:
raise CloudNotAvailable
return f"https://{hass.data[DOMAIN].remote.instance_domain}"
def is_cloudhook_request(request):
"""Test if a request came from a cloudhook.
Async friendly.
"""
return isinstance(request, MockRequest)
async def async_setup(hass, config):
"""Initialize the Home Assistant cloud."""
# Process configs
if DOMAIN in config:
kwargs = dict(config[DOMAIN])
else:
kwargs = {CONF_MODE: DEFAULT_MODE}
# Alexa/Google custom config
alexa_conf = kwargs.pop(CONF_ALEXA, None) or ALEXA_SCHEMA({})
google_conf = kwargs.pop(CONF_GOOGLE_ACTIONS, None) or GACTIONS_SCHEMA({})
# Cloud settings
prefs = CloudPreferences(hass)
await prefs.async_initialize()
# Initialize Cloud
websession = hass.helpers.aiohttp_client.async_get_clientsession()
client = CloudClient(hass, prefs, websession, alexa_conf, google_conf)
cloud = hass.data[DOMAIN] = Cloud(client, **kwargs)
async def _shutdown(event):
"""Shutdown event."""
await cloud.stop()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown)
async def _service_handler(service):
"""Handle service for cloud."""
if service.service == SERVICE_REMOTE_CONNECT:
await cloud.remote.connect()
await prefs.async_update(remote_enabled=True)
elif service.service == SERVICE_REMOTE_DISCONNECT:
await cloud.remote.disconnect()
await prefs.async_update(remote_enabled=False)
hass.helpers.service.async_register_admin_service(
DOMAIN, SERVICE_REMOTE_CONNECT, _service_handler
)
hass.helpers.service.async_register_admin_service(
DOMAIN, SERVICE_REMOTE_DISCONNECT, _service_handler
)
loaded = False
async def _on_connect():
"""Discover RemoteUI binary sensor."""
nonlocal loaded
# Prevent multiple discovery
if loaded:
return
loaded = True
await hass.helpers.discovery.async_load_platform(
"binary_sensor", DOMAIN, {}, config
)
await hass.helpers.discovery.async_load_platform("stt", DOMAIN, {}, config)
await hass.helpers.discovery.async_load_platform("tts", DOMAIN, {}, config)
cloud.iot.register_on_connect(_on_connect)
await cloud.start()
await http_api.async_setup(hass)
account_link.async_setup(hass)
return True
| apache-2.0 |
duki994/SM-G850_Kernel_LP | android-toolchain/share/gdb/python/gdb/command/prompt.py | 68 | 2079 | # Extended prompt.
# Copyright (C) 2011-2015 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""GDB command for working with extended prompts."""
import gdb
import gdb.prompt
class _ExtendedPrompt(gdb.Parameter):
"""Set the extended prompt.
Usage: set extended-prompt VALUE
Substitutions are applied to VALUE to compute the real prompt.
The currently defined substitutions are:
"""
# Add the prompt library's dynamically generated help to the
# __doc__ string.
__doc__ = __doc__ + gdb.prompt.prompt_help()
set_doc = "Set the extended prompt."
show_doc = "Show the extended prompt."
def __init__(self):
super(_ExtendedPrompt, self).__init__("extended-prompt",
gdb.COMMAND_SUPPORT,
gdb.PARAM_STRING_NOESCAPE)
self.value = ''
self.hook_set = False
def get_show_string (self, pvalue):
if self.value is not '':
return "The extended prompt is: " + self.value
else:
return "The extended prompt is not set."
def get_set_string (self):
if self.hook_set == False:
gdb.prompt_hook = self.before_prompt_hook
self.hook_set = True
return ""
def before_prompt_hook(self, current):
if self.value is not '':
return gdb.prompt.substitute_prompt(self.value)
else:
return None
_ExtendedPrompt()
| gpl-2.0 |
midonet/python-neutron-plugin-midonet | midonet/neutron/extensions/system.py | 2 | 3293 | # Copyright (C) 2014 Midokura SARL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from neutron.api import extensions
from neutron.api.v2 import base
from neutron import manager
SYSTEM = 'system'
SYSTEMS = '%ss' % SYSTEM
RESOURCE_ATTRIBUTE_MAP = {
SYSTEMS: {
'state': {'allow_post': False, 'allow_put': True,
'validate': {'type:values': ['UPGRADE', 'ACTIVE']},
'is_visible': True, 'required_by_policy': True},
'availability': {'allow_post': False, 'allow_put': True,
'validate': {
'type:values': [
'READONLY',
'READWRITE'
]
},
'is_visible': True, 'default': 'READWRITE',
'required_by_policy': True},
'write_version': {'allow_post': False, 'allow_put': True,
'validate': {'type:regex': '^(\d+\.\d+)$'},
'is_visible': True, 'required_by_policy': True}
}
}
class System(object):
@classmethod
def get_name(cls):
return "Midonet System Control"
@classmethod
def get_alias(cls):
return "system"
@classmethod
def get_description(cls):
return ("Control the system for maintenance and upgrades")
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/system/api/v1.0"
@classmethod
def get_updated(cls):
return "2014-07-20T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
plugin = manager.NeutronManager.get_plugin()
# system
resource_name = SYSTEM
collection_name = SYSTEMS
params = RESOURCE_ATTRIBUTE_MAP.get(collection_name, dict())
controller = base.create_resource(
collection_name, resource_name, plugin, params)
ex = extensions.ResourceExtension(collection_name, controller)
return [ex]
def update_attributes_map(self, attributes):
for resource_map, attrs in RESOURCE_ATTRIBUTE_MAP.iteritems():
extended_attrs = attributes.get(resource_map)
if extended_attrs:
attrs.update(extended_attrs)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
@six.add_metaclass(abc.ABCMeta)
class SystemPluginBase(object):
@abc.abstractmethod
def get_system(self, context, id, fields=None):
pass
@abc.abstractmethod
def update_system(self, context, id, system):
pass
| apache-2.0 |
gsnbng/erpnext | erpnext/accounts/doctype/bank_guarantee/bank_guarantee.py | 2 | 1173 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, json
from frappe.model.document import Document
from frappe import _
from frappe.desk.search import sanitize_searchfield
class BankGuarantee(Document):
def validate(self):
if not (self.customer or self.supplier):
frappe.throw(_("Select the customer or supplier."))
def on_submit(self):
if not self.bank_guarantee_number:
frappe.throw(_("Enter the Bank Guarantee Number before submittting."))
if not self.name_of_beneficiary:
frappe.throw(_("Enter the name of the Beneficiary before submittting."))
if not self.bank:
frappe.throw(_("Enter the name of the bank or lending institution before submittting."))
@frappe.whitelist()
def get_vouchar_detials(column_list, doctype, docname):
column_list = json.loads(column_list)
for col in column_list:
sanitize_searchfield(col)
return frappe.db.sql(''' select {columns} from `tab{doctype}` where name=%s'''
.format(columns=", ".join(json.loads(column_list)), doctype=doctype), docname, as_dict=1)[0]
| agpl-3.0 |
p4datasystems/CarnotKEdist | dist/Lib/unittest/case.py | 28 | 42629 | """Test case implementation"""
import collections
import sys
import functools
import difflib
import pprint
import re
import types
import warnings
from . import result
from .util import (
strclass, safe_repr, unorderable_list_difference,
_count_diff_all_purpose, _count_diff_hashable
)
__unittest = True
DIFF_OMITTED = ('\nDiff is %s characters long. '
'Set self.maxDiff to None to see it.')
class SkipTest(Exception):
"""
Raise this exception in a test to skip it.
Usually you can use TestResult.skip() or one of the skipping decorators
instead of raising this directly.
"""
pass
class _ExpectedFailure(Exception):
"""
Raise this when a test is expected to fail.
This is an implementation detail.
"""
def __init__(self, exc_info):
super(_ExpectedFailure, self).__init__()
self.exc_info = exc_info
class _UnexpectedSuccess(Exception):
"""
The test was supposed to fail, but it didn't!
"""
pass
def _id(obj):
return obj
def skip(reason):
"""
Unconditionally skip a test.
"""
def decorator(test_item):
if not isinstance(test_item, (type, types.ClassType)):
@functools.wraps(test_item)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
test_item = skip_wrapper
test_item.__unittest_skip__ = True
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIf(condition, reason):
"""
Skip a test if the condition is true.
"""
if condition:
return skip(reason)
return _id
def skipUnless(condition, reason):
"""
Skip a test unless the condition is true.
"""
if not condition:
return skip(reason)
return _id
def expectedFailure(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
raise _ExpectedFailure(sys.exc_info())
raise _UnexpectedSuccess
return wrapper
class _AssertRaisesContext(object):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __init__(self, expected, test_case, expected_regexp=None):
self.expected = expected
self.failureException = test_case.failureException
self.expected_regexp = expected_regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
raise self.failureException(
"{0} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value # store for later retrieval
if self.expected_regexp is None:
return True
expected_regexp = self.expected_regexp
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException('"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
return True
class TestCase(object):
"""A class whose instances are single test cases.
By default, the test code itself should be placed in a method named
'runTest'.
If the fixture may be used for many test cases, create as
many test methods as are needed. When instantiating such a TestCase
subclass, specify in the constructor arguments the name of the test method
that the instance is to execute.
Test authors should subclass TestCase for their own tests. Construction
and deconstruction of the test's environment ('fixture') can be
implemented by overriding the 'setUp' and 'tearDown' methods respectively.
If it is necessary to override the __init__ method, the base class
__init__ method must always be called. It is important that subclasses
should not change the signature of their __init__ method, since instances
of the classes are instantiated automatically by parts of the framework
in order to be run.
"""
# This attribute determines which exception will be raised when
# the instance's assertion methods fail; test methods raising this
# exception will be deemed to have 'failed' rather than 'errored'
failureException = AssertionError
# This attribute determines whether long messages (including repr of
# objects used in assert methods) will be printed on failure in *addition*
# to any explicit message passed.
longMessage = False
# This attribute sets the maximum length of a diff in failure messages
# by assert methods using difflib. It is looked up as an instance attribute
# so can be configured by individual tests if required.
maxDiff = 80*8
# If a string is longer than _diffThreshold, use normal comparison instead
# of difflib. See #11763.
_diffThreshold = 2**16
# Attribute used by TestSuite for classSetUp
_classSetupFailed = False
def __init__(self, methodName='runTest'):
"""Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
self._testMethodName = methodName
self._resultForDoCleanups = None
try:
testMethod = getattr(self, methodName)
except AttributeError:
raise ValueError("no such test method in %s: %s" %
(self.__class__, methodName))
self._testMethodDoc = testMethod.__doc__
self._cleanups = []
# Map types to custom assertEqual functions that will compare
# instances of said type in more detail to generate a more useful
# error message.
self._type_equality_funcs = {}
self.addTypeEqualityFunc(dict, 'assertDictEqual')
self.addTypeEqualityFunc(list, 'assertListEqual')
self.addTypeEqualityFunc(tuple, 'assertTupleEqual')
self.addTypeEqualityFunc(set, 'assertSetEqual')
self.addTypeEqualityFunc(frozenset, 'assertSetEqual')
try:
self.addTypeEqualityFunc(unicode, 'assertMultiLineEqual')
except NameError:
# No unicode support in this build
pass
def addTypeEqualityFunc(self, typeobj, function):
"""Add a type specific assertEqual style function to compare a type.
This method is for use by TestCase subclasses that need to register
their own type equality functions to provide nicer error messages.
Args:
typeobj: The data type to call this function on when both values
are of the same type in assertEqual().
function: The callable taking two arguments and an optional
msg= argument that raises self.failureException with a
useful error message when the two arguments are not equal.
"""
self._type_equality_funcs[typeobj] = function
def addCleanup(self, function, *args, **kwargs):
"""Add a function, with arguments, to be called when the test is
completed. Functions added are called on a LIFO basis and are
called after tearDown on test failure or success.
Cleanup items are called even if setUp fails (unlike tearDown)."""
self._cleanups.append((function, args, kwargs))
def setUp(self):
"Hook method for setting up the test fixture before exercising it."
pass
def tearDown(self):
"Hook method for deconstructing the test fixture after testing it."
pass
@classmethod
def setUpClass(cls):
"Hook method for setting up class fixture before running tests in the class."
@classmethod
def tearDownClass(cls):
"Hook method for deconstructing the class fixture after running all tests in the class."
def countTestCases(self):
return 1
def defaultTestResult(self):
return result.TestResult()
def shortDescription(self):
"""Returns a one-line description of the test, or None if no
description has been provided.
The default implementation of this method returns the first line of
the specified test method's docstring.
"""
doc = self._testMethodDoc
return doc and doc.split("\n")[0].strip() or None
def id(self):
return "%s.%s" % (strclass(self.__class__), self._testMethodName)
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self._testMethodName == other._testMethodName
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._testMethodName))
def __str__(self):
return "%s (%s)" % (self._testMethodName, strclass(self.__class__))
def __repr__(self):
return "<%s testMethod=%s>" % \
(strclass(self.__class__), self._testMethodName)
def _addSkip(self, result, reason):
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None:
addSkip(self, reason)
else:
warnings.warn("TestResult has no addSkip method, skips not reported",
RuntimeWarning, 2)
result.addSuccess(self)
def run(self, result=None):
orig_result = result
if result is None:
result = self.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
self._resultForDoCleanups = result
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, skip_why)
finally:
result.stopTest(self)
return
try:
success = False
try:
self.setUp()
except SkipTest as e:
self._addSkip(result, str(e))
except KeyboardInterrupt:
raise
except:
result.addError(self, sys.exc_info())
else:
try:
testMethod()
except KeyboardInterrupt:
raise
except self.failureException:
result.addFailure(self, sys.exc_info())
except _ExpectedFailure as e:
addExpectedFailure = getattr(result, 'addExpectedFailure', None)
if addExpectedFailure is not None:
addExpectedFailure(self, e.exc_info)
else:
warnings.warn("TestResult has no addExpectedFailure method, reporting as passes",
RuntimeWarning)
result.addSuccess(self)
except _UnexpectedSuccess:
addUnexpectedSuccess = getattr(result, 'addUnexpectedSuccess', None)
if addUnexpectedSuccess is not None:
addUnexpectedSuccess(self)
else:
warnings.warn("TestResult has no addUnexpectedSuccess method, reporting as failures",
RuntimeWarning)
result.addFailure(self, sys.exc_info())
except SkipTest as e:
self._addSkip(result, str(e))
except:
result.addError(self, sys.exc_info())
else:
success = True
try:
self.tearDown()
except KeyboardInterrupt:
raise
except:
result.addError(self, sys.exc_info())
success = False
cleanUpSuccess = self.doCleanups()
success = success and cleanUpSuccess
if success:
result.addSuccess(self)
finally:
result.stopTest(self)
if orig_result is None:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
def doCleanups(self):
"""Execute all cleanup functions. Normally called for you after
tearDown."""
result = self._resultForDoCleanups
ok = True
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
try:
function(*args, **kwargs)
except KeyboardInterrupt:
raise
except:
ok = False
result.addError(self, sys.exc_info())
return ok
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the test without collecting errors in a TestResult"""
self.setUp()
getattr(self, self._testMethodName)()
self.tearDown()
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
function(*args, **kwargs)
def skipTest(self, reason):
"""Skip this test."""
raise SkipTest(reason)
def fail(self, msg=None):
"""Fail immediately, with the given message."""
raise self.failureException(msg)
def assertFalse(self, expr, msg=None):
"""Check that the expression is false."""
if expr:
msg = self._formatMessage(msg, "%s is not false" % safe_repr(expr))
raise self.failureException(msg)
def assertTrue(self, expr, msg=None):
"""Check that the expression is true."""
if not expr:
msg = self._formatMessage(msg, "%s is not true" % safe_repr(expr))
raise self.failureException(msg)
def _formatMessage(self, msg, standardMsg):
"""Honour the longMessage attribute when generating failure messages.
If longMessage is False this means:
* Use only an explicit message if it is provided
* Otherwise use the standard message for the assert
If longMessage is True:
* Use the standard message
* If an explicit message is provided, plus ' : ' and the explicit message
"""
if not self.longMessage:
return msg or standardMsg
if msg is None:
return standardMsg
try:
# don't switch to '{}' formatting in Python 2.X
# it changes the way unicode input is handled
return '%s : %s' % (standardMsg, msg)
except UnicodeDecodeError:
return '%s : %s' % (safe_repr(standardMsg), safe_repr(msg))
def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
"""Fail unless an exception of class excClass is raised
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
raised, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
If called with callableObj omitted or None, will return a
context object used like this::
with self.assertRaises(SomeException):
do_something()
The context manager keeps a reference to the exception as
the 'exception' attribute. This allows you to inspect the
exception after the assertion::
with self.assertRaises(SomeException) as cm:
do_something()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, 3)
"""
context = _AssertRaisesContext(excClass, self)
if callableObj is None:
return context
with context:
callableObj(*args, **kwargs)
def _getAssertEqualityFunc(self, first, second):
"""Get a detailed comparison function for the types of the two args.
Returns: A callable accepting (first, second, msg=None) that will
raise a failure exception if first != second with a useful human
readable error message for those types.
"""
#
# NOTE(gregory.p.smith): I considered isinstance(first, type(second))
# and vice versa. I opted for the conservative approach in case
# subclasses are not intended to be compared in detail to their super
# class instances using a type equality func. This means testing
# subtypes won't automagically use the detailed comparison. Callers
# should use their type specific assertSpamEqual method to compare
# subclasses if the detailed comparison is desired and appropriate.
# See the discussion in http://bugs.python.org/issue2578.
#
if type(first) is type(second):
asserter = self._type_equality_funcs.get(type(first))
if asserter is not None:
if isinstance(asserter, basestring):
asserter = getattr(self, asserter)
return asserter
return self._baseAssertEqual
def _baseAssertEqual(self, first, second, msg=None):
"""The default assertEqual implementation, not type specific."""
if not first == second:
standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second))
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertEqual(self, first, second, msg=None):
"""Fail if the two objects are unequal as determined by the '=='
operator.
"""
assertion_func = self._getAssertEqualityFunc(first, second)
assertion_func(first, second, msg=msg)
def assertNotEqual(self, first, second, msg=None):
"""Fail if the two objects are equal as determined by the '!='
operator.
"""
if not first != second:
msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first),
safe_repr(second)))
raise self.failureException(msg)
def assertAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Fail if the two objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is more than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
If the two objects compare equal then they will automatically
compare almost equal.
"""
if first == second:
# shortcut
return
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if abs(first - second) <= delta:
return
standardMsg = '%s != %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if round(abs(second-first), places) == 0:
return
standardMsg = '%s != %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertNotAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Fail if the two objects are equal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is less than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
Objects that are equal automatically fail.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if not (first == second) and abs(first - second) > delta:
return
standardMsg = '%s == %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if not (first == second) and round(abs(second-first), places) != 0:
return
standardMsg = '%s == %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
# Synonyms for assertion methods
# The plurals are undocumented. Keep them that way to discourage use.
# Do not add more. Do not remove.
# Going through a deprecation cycle on these would annoy many people.
assertEquals = assertEqual
assertNotEquals = assertNotEqual
assertAlmostEquals = assertAlmostEqual
assertNotAlmostEquals = assertNotAlmostEqual
assert_ = assertTrue
# These fail* assertion method names are pending deprecation and will
# be a DeprecationWarning in 3.2; http://bugs.python.org/issue2578
def _deprecate(original_func):
def deprecated_func(*args, **kwargs):
warnings.warn(
'Please use {0} instead.'.format(original_func.__name__),
PendingDeprecationWarning, 2)
return original_func(*args, **kwargs)
return deprecated_func
failUnlessEqual = _deprecate(assertEqual)
failIfEqual = _deprecate(assertNotEqual)
failUnlessAlmostEqual = _deprecate(assertAlmostEqual)
failIfAlmostEqual = _deprecate(assertNotAlmostEqual)
failUnless = _deprecate(assertTrue)
failUnlessRaises = _deprecate(assertRaises)
failIf = _deprecate(assertFalse)
def assertSequenceEqual(self, seq1, seq2, msg=None, seq_type=None):
"""An equality assertion for ordered sequences (like lists and tuples).
For the purposes of this function, a valid ordered sequence type is one
which can be indexed, has a length, and has an equality operator.
Args:
seq1: The first sequence to compare.
seq2: The second sequence to compare.
seq_type: The expected datatype of the sequences, or None if no
datatype should be enforced.
msg: Optional message to use on failure instead of a list of
differences.
"""
if seq_type is not None:
seq_type_name = seq_type.__name__
if not isinstance(seq1, seq_type):
raise self.failureException('First sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq1)))
if not isinstance(seq2, seq_type):
raise self.failureException('Second sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq2)))
else:
seq_type_name = "sequence"
differing = None
try:
len1 = len(seq1)
except (TypeError, NotImplementedError):
differing = 'First %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
try:
len2 = len(seq2)
except (TypeError, NotImplementedError):
differing = 'Second %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
if seq1 == seq2:
return
seq1_repr = safe_repr(seq1)
seq2_repr = safe_repr(seq2)
if len(seq1_repr) > 30:
seq1_repr = seq1_repr[:30] + '...'
if len(seq2_repr) > 30:
seq2_repr = seq2_repr[:30] + '...'
elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr)
differing = '%ss differ: %s != %s\n' % elements
for i in xrange(min(len1, len2)):
try:
item1 = seq1[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of first %s\n' %
(i, seq_type_name))
break
try:
item2 = seq2[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of second %s\n' %
(i, seq_type_name))
break
if item1 != item2:
differing += ('\nFirst differing element %d:\n%s\n%s\n' %
(i, item1, item2))
break
else:
if (len1 == len2 and seq_type is None and
type(seq1) != type(seq2)):
# The sequences are the same, but have differing types.
return
if len1 > len2:
differing += ('\nFirst %s contains %d additional '
'elements.\n' % (seq_type_name, len1 - len2))
try:
differing += ('First extra element %d:\n%s\n' %
(len2, seq1[len2]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of first %s\n' % (len2, seq_type_name))
elif len1 < len2:
differing += ('\nSecond %s contains %d additional '
'elements.\n' % (seq_type_name, len2 - len1))
try:
differing += ('First extra element %d:\n%s\n' %
(len1, seq2[len1]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of second %s\n' % (len1, seq_type_name))
standardMsg = differing
diffMsg = '\n' + '\n'.join(
difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def _truncateMessage(self, message, diff):
max_diff = self.maxDiff
if max_diff is None or len(diff) <= max_diff:
return message + diff
return message + (DIFF_OMITTED % len(diff))
def assertListEqual(self, list1, list2, msg=None):
"""A list-specific equality assertion.
Args:
list1: The first list to compare.
list2: The second list to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(list1, list2, msg, seq_type=list)
def assertTupleEqual(self, tuple1, tuple2, msg=None):
"""A tuple-specific equality assertion.
Args:
tuple1: The first tuple to compare.
tuple2: The second tuple to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple)
def assertSetEqual(self, set1, set2, msg=None):
"""A set-specific equality assertion.
Args:
set1: The first set to compare.
set2: The second set to compare.
msg: Optional message to use on failure instead of a list of
differences.
assertSetEqual uses ducktyping to support different types of sets, and
is optimized for sets specifically (parameters must support a
difference method).
"""
try:
difference1 = set1.difference(set2)
except TypeError, e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError, e:
self.fail('first argument does not support set difference: %s' % e)
try:
difference2 = set2.difference(set1)
except TypeError, e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError, e:
self.fail('second argument does not support set difference: %s' % e)
if not (difference1 or difference2):
return
lines = []
if difference1:
lines.append('Items in the first set but not the second:')
for item in difference1:
lines.append(repr(item))
if difference2:
lines.append('Items in the second set but not the first:')
for item in difference2:
lines.append(repr(item))
standardMsg = '\n'.join(lines)
self.fail(self._formatMessage(msg, standardMsg))
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIn(self, member, container, msg=None):
"""Just like self.assertTrue(a not in b), but with a nicer default message."""
if member in container:
standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertIs(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is b), but with a nicer default message."""
if expr1 is not expr2:
standardMsg = '%s is not %s' % (safe_repr(expr1),
safe_repr(expr2))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNot(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is not b), but with a nicer default message."""
if expr1 is expr2:
standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictEqual(self, d1, d2, msg=None):
self.assertIsInstance(d1, dict, 'First argument is not a dictionary')
self.assertIsInstance(d2, dict, 'Second argument is not a dictionary')
if d1 != d2:
standardMsg = '%s != %s' % (safe_repr(d1, True), safe_repr(d2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(d1).splitlines(),
pprint.pformat(d2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictContainsSubset(self, expected, actual, msg=None):
"""Checks whether actual is a superset of expected."""
missing = []
mismatched = []
for key, value in expected.iteritems():
if key not in actual:
missing.append(key)
elif value != actual[key]:
mismatched.append('%s, expected: %s, actual: %s' %
(safe_repr(key), safe_repr(value),
safe_repr(actual[key])))
if not (missing or mismatched):
return
standardMsg = ''
if missing:
standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
missing)
if mismatched:
if standardMsg:
standardMsg += '; '
standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
self.fail(self._formatMessage(msg, standardMsg))
def assertItemsEqual(self, expected_seq, actual_seq, msg=None):
"""An unordered sequence specific comparison. It asserts that
actual_seq and expected_seq have the same element counts.
Equivalent to::
self.assertEqual(Counter(iter(actual_seq)),
Counter(iter(expected_seq)))
Asserts that each element has the same count in both sequences.
Example:
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
"""
first_seq, second_seq = list(expected_seq), list(actual_seq)
with warnings.catch_warnings():
if sys.py3kwarning:
# Silence Py3k warning raised during the sorting
for _msg in ["(code|dict|type) inequality comparisons",
"builtin_function_or_method order comparisons",
"comparing unequal types"]:
warnings.filterwarnings("ignore", _msg, DeprecationWarning)
try:
first = collections.Counter(first_seq)
second = collections.Counter(second_seq)
except TypeError:
# Handle case with unhashable elements
differences = _count_diff_all_purpose(first_seq, second_seq)
else:
if first == second:
return
differences = _count_diff_hashable(first_seq, second_seq)
if differences:
standardMsg = 'Element counts were not equal:\n'
lines = ['First has %d, Second has %d: %r' % diff for diff in differences]
diffMsg = '\n'.join(lines)
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def assertMultiLineEqual(self, first, second, msg=None):
"""Assert that two multi-line strings are equal."""
self.assertIsInstance(first, basestring,
'First argument is not a string')
self.assertIsInstance(second, basestring,
'Second argument is not a string')
if first != second:
# don't use difflib if the strings are too long
if (len(first) > self._diffThreshold or
len(second) > self._diffThreshold):
self._baseAssertEqual(first, second, msg)
firstlines = first.splitlines(True)
secondlines = second.splitlines(True)
if len(firstlines) == 1 and first.strip('\r\n') == first:
firstlines = [first + '\n']
secondlines = [second + '\n']
standardMsg = '%s != %s' % (safe_repr(first, True),
safe_repr(second, True))
diff = '\n' + ''.join(difflib.ndiff(firstlines, secondlines))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertLess(self, a, b, msg=None):
"""Just like self.assertTrue(a < b), but with a nicer default message."""
if not a < b:
standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertLessEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a <= b), but with a nicer default message."""
if not a <= b:
standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreater(self, a, b, msg=None):
"""Just like self.assertTrue(a > b), but with a nicer default message."""
if not a > b:
standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreaterEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a >= b), but with a nicer default message."""
if not a >= b:
standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNone(self, obj, msg=None):
"""Same as self.assertTrue(obj is None), with a nicer default message."""
if obj is not None:
standardMsg = '%s is not None' % (safe_repr(obj),)
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNotNone(self, obj, msg=None):
"""Included for symmetry with assertIsNone."""
if obj is None:
standardMsg = 'unexpectedly None'
self.fail(self._formatMessage(msg, standardMsg))
def assertIsInstance(self, obj, cls, msg=None):
"""Same as self.assertTrue(isinstance(obj, cls)), with a nicer
default message."""
if not isinstance(obj, cls):
standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Included for symmetry with assertIsInstance."""
if isinstance(obj, cls):
standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertRaisesRegexp(self, expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regexp.
Args:
expected_exception: Exception class expected to be raised.
expected_regexp: Regexp (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
args: Extra args.
kwargs: Extra kwargs.
"""
context = _AssertRaisesContext(expected_exception, self, expected_regexp)
if callable_obj is None:
return context
with context:
callable_obj(*args, **kwargs)
def assertRegexpMatches(self, text, expected_regexp, msg=None):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(text):
msg = msg or "Regexp didn't match"
msg = '%s: %r not found in %r' % (msg, expected_regexp.pattern, text)
raise self.failureException(msg)
def assertNotRegexpMatches(self, text, unexpected_regexp, msg=None):
"""Fail the test if the text matches the regular expression."""
if isinstance(unexpected_regexp, basestring):
unexpected_regexp = re.compile(unexpected_regexp)
match = unexpected_regexp.search(text)
if match:
msg = msg or "Regexp matched"
msg = '%s: %r matches %r in %r' % (msg,
text[match.start():match.end()],
unexpected_regexp.pattern,
text)
raise self.failureException(msg)
class FunctionTestCase(TestCase):
"""A test case that wraps a test function.
This is useful for slipping pre-existing test functions into the
unittest framework. Optionally, set-up and tidy-up functions can be
supplied. As with TestCase, the tidy-up ('tearDown') function will
always be called if the set-up ('setUp') function ran successfully.
"""
def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
super(FunctionTestCase, self).__init__()
self._setUpFunc = setUp
self._tearDownFunc = tearDown
self._testFunc = testFunc
self._description = description
def setUp(self):
if self._setUpFunc is not None:
self._setUpFunc()
def tearDown(self):
if self._tearDownFunc is not None:
self._tearDownFunc()
def runTest(self):
self._testFunc()
def id(self):
return self._testFunc.__name__
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._setUpFunc == other._setUpFunc and \
self._tearDownFunc == other._tearDownFunc and \
self._testFunc == other._testFunc and \
self._description == other._description
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._setUpFunc, self._tearDownFunc,
self._testFunc, self._description))
def __str__(self):
return "%s (%s)" % (strclass(self.__class__),
self._testFunc.__name__)
def __repr__(self):
return "<%s tec=%s>" % (strclass(self.__class__),
self._testFunc)
def shortDescription(self):
if self._description is not None:
return self._description
doc = self._testFunc.__doc__
return doc and doc.split("\n")[0].strip() or None
| apache-2.0 |
jostep/tensorflow | tensorflow/python/kernel_tests/string_join_op_test.py | 134 | 1896 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for string_join_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class StringJoinOpTest(test.TestCase):
def testStringJoin(self):
input0 = ["a", "b"]
input1 = "a"
input2 = [["b"], ["c"]]
with self.test_session():
output = string_ops.string_join([input0, input1])
self.assertAllEqual(output.eval(), [b"aa", b"ba"])
output = string_ops.string_join([input0, input1], separator="--")
self.assertAllEqual(output.eval(), [b"a--a", b"b--a"])
output = string_ops.string_join([input0, input1, input0], separator="--")
self.assertAllEqual(output.eval(), [b"a--a--a", b"b--a--b"])
output = string_ops.string_join([input1] * 4, separator="!")
self.assertEqual(output.eval(), b"a!a!a!a")
output = string_ops.string_join([input2] * 2, separator="")
self.assertAllEqual(output.eval(), [[b"bb"], [b"cc"]])
with self.assertRaises(ValueError): # Inconsistent shapes
string_ops.string_join([input0, input2]).eval()
if __name__ == "__main__":
test.main()
| apache-2.0 |
Uh-huh-Philip/ShadowDNS | test.py | 79 | 1492 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
sys.path.insert(0, 'shadowsocks')
import os
import signal
import select
import time
from subprocess import Popen, PIPE
p1 = Popen(['sudo', sys.executable, 'shadowdns/dnsrelay.py', '-c', sys.argv[-1]],
shell=False, bufsize=0, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
p2 = Popen(['ssserver', '-c', sys.argv[-1]], shell=False, bufsize=0, stdin=PIPE,
stdout=PIPE, stderr=PIPE, close_fds=True, env=os.environ)
p3 = None
try:
local_ready = False
server_ready = False
fdset = [p1.stdout, p2.stdout, p1.stderr, p2.stderr]
while True:
r, w, e = select.select(fdset, [], fdset)
if e:
break
for fd in r:
line = fd.readline()
sys.stdout.write(line)
if line.find('starting dns') >= 0:
local_ready = True
if line.find('starting server') >= 0:
server_ready = True
if local_ready and server_ready and p3 is None:
time.sleep(1)
p3 = Popen(['dig', '@127.0.0.1', 'any', 'google.com'],
shell=False, bufsize=0, close_fds=True)
break
if p3 is not None:
r = p3.wait()
if r == 0:
print 'test passed'
sys.exit(r)
finally:
for p in [p1, p2]:
try:
os.kill(p.pid, signal.SIGTERM)
except OSError:
pass
sys.exit(-1)
| mit |
PublicaMundi/pycsw | pycsw/formats/__init__.py | 4 | 1317 | # -*- coding: ISO-8859-15 -*-
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
#
# Copyright (c) 2012 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
| mit |
sferik/rubinius | vendor/libffi/generate-darwin-source-and-headers.py | 87 | 6613 | #!/usr/bin/env python
import subprocess
import os
import errno
import collections
import glob
import argparse
class Platform(object):
pass
class simulator_platform(Platform):
directory = 'darwin_ios'
sdk = 'iphonesimulator'
arch = 'i386'
triple = 'i386-apple-darwin11'
version_min = '-miphoneos-version-min=5.1.1'
prefix = "#ifdef __i386__\n\n"
suffix = "\n\n#endif"
src_dir = 'x86'
src_files = ['darwin.S', 'win32.S', 'ffi.c']
class simulator64_platform(Platform):
directory = 'darwin_ios'
sdk = 'iphonesimulator'
arch = 'x86_64'
triple = 'x86_64-apple-darwin13'
version_min = '-miphoneos-version-min=7.0'
prefix = "#ifdef __x86_64__\n\n"
suffix = "\n\n#endif"
src_dir = 'x86'
src_files = ['darwin64.S', 'ffi64.c']
class device_platform(Platform):
directory = 'darwin_ios'
sdk = 'iphoneos'
arch = 'armv7'
triple = 'arm-apple-darwin11'
version_min = '-miphoneos-version-min=5.1.1'
prefix = "#ifdef __arm__\n\n"
suffix = "\n\n#endif"
src_dir = 'arm'
src_files = ['sysv.S', 'trampoline.S', 'ffi.c']
class device64_platform(Platform):
directory = 'darwin_ios'
sdk = 'iphoneos'
arch = 'arm64'
triple = 'aarch64-apple-darwin13'
version_min = '-miphoneos-version-min=7.0'
prefix = "#ifdef __arm64__\n\n"
suffix = "\n\n#endif"
src_dir = 'aarch64'
src_files = ['sysv.S', 'ffi.c']
class desktop32_platform(Platform):
directory = 'darwin_osx'
sdk = 'macosx'
arch = 'i386'
triple = 'i386-apple-darwin10'
version_min = '-mmacosx-version-min=10.6'
src_dir = 'x86'
src_files = ['darwin.S', 'win32.S', 'ffi.c']
prefix = "#ifdef __i386__\n\n"
suffix = "\n\n#endif"
class desktop64_platform(Platform):
directory = 'darwin_osx'
sdk = 'macosx'
arch = 'x86_64'
triple = 'x86_64-apple-darwin10'
version_min = '-mmacosx-version-min=10.6'
prefix = "#ifdef __x86_64__\n\n"
suffix = "\n\n#endif"
src_dir = 'x86'
src_files = ['darwin64.S', 'ffi64.c']
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST:
pass
else:
raise
def move_file(src_dir, dst_dir, filename, file_suffix=None, prefix='', suffix=''):
mkdir_p(dst_dir)
out_filename = filename
if file_suffix:
split_name = os.path.splitext(filename)
out_filename = "%s_%s%s" % (split_name[0], file_suffix, split_name[1])
with open(os.path.join(src_dir, filename)) as in_file:
with open(os.path.join(dst_dir, out_filename), 'w') as out_file:
if prefix:
out_file.write(prefix)
out_file.write(in_file.read())
if suffix:
out_file.write(suffix)
def list_files(src_dir, pattern=None, filelist=None):
if pattern: filelist = glob.iglob(os.path.join(src_dir, pattern))
for file in filelist:
yield os.path.basename(file)
def copy_files(src_dir, dst_dir, pattern=None, filelist=None, file_suffix=None, prefix=None, suffix=None):
for filename in list_files(src_dir, pattern=pattern, filelist=filelist):
move_file(src_dir, dst_dir, filename, file_suffix=file_suffix, prefix=prefix, suffix=suffix)
def copy_src_platform_files(platform):
src_dir = os.path.join('src', platform.src_dir)
dst_dir = os.path.join(platform.directory, 'src', platform.src_dir)
copy_files(src_dir, dst_dir, filelist=platform.src_files, file_suffix=platform.arch, prefix=platform.prefix, suffix=platform.suffix)
def build_target(platform, platform_headers):
def xcrun_cmd(cmd):
return 'xcrun -sdk %s %s -arch %s' % (platform.sdk, cmd, platform.arch)
tag='%s-%s' % (platform.sdk, platform.arch)
build_dir = 'build_%s' % tag
mkdir_p(build_dir)
env = dict(CC=xcrun_cmd('clang'),
LD=xcrun_cmd('ld'),
CFLAGS='%s' % (platform.version_min))
working_dir = os.getcwd()
try:
os.chdir(build_dir)
subprocess.check_call(['../configure', '-host', platform.triple], env=env)
finally:
os.chdir(working_dir)
for src_dir in [build_dir, os.path.join(build_dir, 'include')]:
copy_files(src_dir,
os.path.join(platform.directory, 'include'),
pattern='*.h',
file_suffix=platform.arch,
prefix=platform.prefix,
suffix=platform.suffix)
for filename in list_files(src_dir, pattern='*.h'):
platform_headers[filename].add((platform.prefix, platform.arch, platform.suffix))
def make_tramp():
with open('src/arm/trampoline.S', 'w') as tramp_out:
p = subprocess.Popen(['bash', 'src/arm/gentramp.sh'], stdout=tramp_out)
p.wait()
def generate_source_and_headers(generate_osx=True, generate_ios=True):
copy_files('src', 'darwin_common/src', pattern='*.c')
copy_files('include', 'darwin_common/include', pattern='*.h')
if generate_ios:
make_tramp()
copy_src_platform_files(simulator_platform)
copy_src_platform_files(simulator64_platform)
copy_src_platform_files(device_platform)
copy_src_platform_files(device64_platform)
if generate_osx:
copy_src_platform_files(desktop32_platform)
copy_src_platform_files(desktop64_platform)
platform_headers = collections.defaultdict(set)
if generate_ios:
build_target(simulator_platform, platform_headers)
build_target(simulator64_platform, platform_headers)
build_target(device_platform, platform_headers)
build_target(device64_platform, platform_headers)
if generate_osx:
build_target(desktop32_platform, platform_headers)
build_target(desktop64_platform, platform_headers)
mkdir_p('darwin_common/include')
for header_name, tag_tuples in platform_headers.iteritems():
basename, suffix = os.path.splitext(header_name)
with open(os.path.join('darwin_common/include', header_name), 'w') as header:
for tag_tuple in tag_tuples:
header.write('%s#include <%s_%s%s>\n%s\n' % (tag_tuple[0], basename, tag_tuple[1], suffix, tag_tuple[2]))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--only-ios', action='store_true', default=False)
parser.add_argument('--only-osx', action='store_true', default=False)
args = parser.parse_args()
generate_source_and_headers(generate_osx=not args.only_ios, generate_ios=not args.only_osx)
| bsd-3-clause |
sumedh123/debatify | venv/lib/python2.7/site-packages/click/_termui_impl.py | 64 | 16377 | """
click._termui_impl
~~~~~~~~~~~~~~~~~~
This module contains implementations for the termui module. To keep the
import time of Click down, some infrequently used functionality is placed
in this module and only imported as needed.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import time
import math
from ._compat import _default_text_stdout, range_type, PY2, isatty, \
open_stream, strip_ansi, term_len, get_best_encoding, WIN
from .utils import echo
from .exceptions import ClickException
if os.name == 'nt':
BEFORE_BAR = '\r'
AFTER_BAR = '\n'
else:
BEFORE_BAR = '\r\033[?25l'
AFTER_BAR = '\033[?25h\n'
def _length_hint(obj):
"""Returns the length hint of an object."""
try:
return len(obj)
except TypeError:
try:
get_hint = type(obj).__length_hint__
except AttributeError:
return None
try:
hint = get_hint(obj)
except TypeError:
return None
if hint is NotImplemented or \
not isinstance(hint, (int, long)) or \
hint < 0:
return None
return hint
class ProgressBar(object):
def __init__(self, iterable, length=None, fill_char='#', empty_char=' ',
bar_template='%(bar)s', info_sep=' ', show_eta=True,
show_percent=None, show_pos=False, item_show_func=None,
label=None, file=None, color=None, width=30):
self.fill_char = fill_char
self.empty_char = empty_char
self.bar_template = bar_template
self.info_sep = info_sep
self.show_eta = show_eta
self.show_percent = show_percent
self.show_pos = show_pos
self.item_show_func = item_show_func
self.label = label or ''
if file is None:
file = _default_text_stdout()
self.file = file
self.color = color
self.width = width
self.autowidth = width == 0
if length is None:
length = _length_hint(iterable)
if iterable is None:
if length is None:
raise TypeError('iterable or length is required')
iterable = range_type(length)
self.iter = iter(iterable)
self.length = length
self.length_known = length is not None
self.pos = 0
self.avg = []
self.start = self.last_eta = time.time()
self.eta_known = False
self.finished = False
self.max_width = None
self.entered = False
self.current_item = None
self.is_hidden = not isatty(self.file)
self._last_line = None
def __enter__(self):
self.entered = True
self.render_progress()
return self
def __exit__(self, exc_type, exc_value, tb):
self.render_finish()
def __iter__(self):
if not self.entered:
raise RuntimeError('You need to use progress bars in a with block.')
self.render_progress()
return self
def render_finish(self):
if self.is_hidden:
return
self.file.write(AFTER_BAR)
self.file.flush()
@property
def pct(self):
if self.finished:
return 1.0
return min(self.pos / (float(self.length) or 1), 1.0)
@property
def time_per_iteration(self):
if not self.avg:
return 0.0
return sum(self.avg) / float(len(self.avg))
@property
def eta(self):
if self.length_known and not self.finished:
return self.time_per_iteration * (self.length - self.pos)
return 0.0
def format_eta(self):
if self.eta_known:
t = self.eta + 1
seconds = t % 60
t /= 60
minutes = t % 60
t /= 60
hours = t % 24
t /= 24
if t > 0:
days = t
return '%dd %02d:%02d:%02d' % (days, hours, minutes, seconds)
else:
return '%02d:%02d:%02d' % (hours, minutes, seconds)
return ''
def format_pos(self):
pos = str(self.pos)
if self.length_known:
pos += '/%s' % self.length
return pos
def format_pct(self):
return ('% 4d%%' % int(self.pct * 100))[1:]
def format_progress_line(self):
show_percent = self.show_percent
info_bits = []
if self.length_known:
bar_length = int(self.pct * self.width)
bar = self.fill_char * bar_length
bar += self.empty_char * (self.width - bar_length)
if show_percent is None:
show_percent = not self.show_pos
else:
if self.finished:
bar = self.fill_char * self.width
else:
bar = list(self.empty_char * (self.width or 1))
if self.time_per_iteration != 0:
bar[int((math.cos(self.pos * self.time_per_iteration)
/ 2.0 + 0.5) * self.width)] = self.fill_char
bar = ''.join(bar)
if self.show_pos:
info_bits.append(self.format_pos())
if show_percent:
info_bits.append(self.format_pct())
if self.show_eta and self.eta_known and not self.finished:
info_bits.append(self.format_eta())
if self.item_show_func is not None:
item_info = self.item_show_func(self.current_item)
if item_info is not None:
info_bits.append(item_info)
return (self.bar_template % {
'label': self.label,
'bar': bar,
'info': self.info_sep.join(info_bits)
}).rstrip()
def render_progress(self):
from .termui import get_terminal_size
nl = False
if self.is_hidden:
buf = [self.label]
nl = True
else:
buf = []
# Update width in case the terminal has been resized
if self.autowidth:
old_width = self.width
self.width = 0
clutter_length = term_len(self.format_progress_line())
new_width = max(0, get_terminal_size()[0] - clutter_length)
if new_width < old_width:
buf.append(BEFORE_BAR)
buf.append(' ' * self.max_width)
self.max_width = new_width
self.width = new_width
clear_width = self.width
if self.max_width is not None:
clear_width = self.max_width
buf.append(BEFORE_BAR)
line = self.format_progress_line()
line_len = term_len(line)
if self.max_width is None or self.max_width < line_len:
self.max_width = line_len
buf.append(line)
buf.append(' ' * (clear_width - line_len))
line = ''.join(buf)
# Render the line only if it changed.
if line != self._last_line:
self._last_line = line
echo(line, file=self.file, color=self.color, nl=nl)
self.file.flush()
def make_step(self, n_steps):
self.pos += n_steps
if self.length_known and self.pos >= self.length:
self.finished = True
if (time.time() - self.last_eta) < 1.0:
return
self.last_eta = time.time()
self.avg = self.avg[-6:] + [-(self.start - time.time()) / (self.pos)]
self.eta_known = self.length_known
def update(self, n_steps):
self.make_step(n_steps)
self.render_progress()
def finish(self):
self.eta_known = 0
self.current_item = None
self.finished = True
def next(self):
if self.is_hidden:
return next(self.iter)
try:
rv = next(self.iter)
self.current_item = rv
except StopIteration:
self.finish()
self.render_progress()
raise StopIteration()
else:
self.update(1)
return rv
if not PY2:
__next__ = next
del next
def pager(text, color=None):
"""Decide what method to use for paging through text."""
stdout = _default_text_stdout()
if not isatty(sys.stdin) or not isatty(stdout):
return _nullpager(stdout, text, color)
pager_cmd = (os.environ.get('PAGER', None) or '').strip()
if pager_cmd:
if WIN:
return _tempfilepager(text, pager_cmd, color)
return _pipepager(text, pager_cmd, color)
if os.environ.get('TERM') in ('dumb', 'emacs'):
return _nullpager(stdout, text, color)
if WIN or sys.platform.startswith('os2'):
return _tempfilepager(text, 'more <', color)
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return _pipepager(text, 'less', color)
import tempfile
fd, filename = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return _pipepager(text, 'more', color)
return _nullpager(stdout, text, color)
finally:
os.unlink(filename)
def _pipepager(text, cmd, color):
"""Page through text by feeding it to another program. Invoking a
pager through this might support colors.
"""
import subprocess
env = dict(os.environ)
# If we're piping to less we might support colors under the
# condition that
cmd_detail = cmd.rsplit('/', 1)[-1].split()
if color is None and cmd_detail[0] == 'less':
less_flags = os.environ.get('LESS', '') + ' '.join(cmd_detail[1:])
if not less_flags:
env['LESS'] = '-R'
color = True
elif 'r' in less_flags or 'R' in less_flags:
color = True
if not color:
text = strip_ansi(text)
c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
env=env)
encoding = get_best_encoding(c.stdin)
try:
c.stdin.write(text.encode(encoding, 'replace'))
c.stdin.close()
except (IOError, KeyboardInterrupt):
pass
# Less doesn't respect ^C, but catches it for its own UI purposes (aborting
# search or other commands inside less).
#
# That means when the user hits ^C, the parent process (click) terminates,
# but less is still alive, paging the output and messing up the terminal.
#
# If the user wants to make the pager exit on ^C, they should set
# `LESS='-K'`. It's not our decision to make.
while True:
try:
c.wait()
except KeyboardInterrupt:
pass
else:
break
def _tempfilepager(text, cmd, color):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
if not color:
text = strip_ansi(text)
encoding = get_best_encoding(sys.stdout)
with open_stream(filename, 'wb')[0] as f:
f.write(text.encode(encoding))
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def _nullpager(stream, text, color):
"""Simply print unformatted text. This is the ultimate fallback."""
if not color:
text = strip_ansi(text)
stream.write(text)
class Editor(object):
def __init__(self, editor=None, env=None, require_save=True,
extension='.txt'):
self.editor = editor
self.env = env
self.require_save = require_save
self.extension = extension
def get_editor(self):
if self.editor is not None:
return self.editor
for key in 'VISUAL', 'EDITOR':
rv = os.environ.get(key)
if rv:
return rv
if WIN:
return 'notepad'
for editor in 'vim', 'nano':
if os.system('which %s >/dev/null 2>&1' % editor) == 0:
return editor
return 'vi'
def edit_file(self, filename):
import subprocess
editor = self.get_editor()
if self.env:
environ = os.environ.copy()
environ.update(self.env)
else:
environ = None
try:
c = subprocess.Popen('%s "%s"' % (editor, filename),
env=environ, shell=True)
exit_code = c.wait()
if exit_code != 0:
raise ClickException('%s: Editing failed!' % editor)
except OSError as e:
raise ClickException('%s: Editing failed: %s' % (editor, e))
def edit(self, text):
import tempfile
text = text or ''
if text and not text.endswith('\n'):
text += '\n'
fd, name = tempfile.mkstemp(prefix='editor-', suffix=self.extension)
try:
if WIN:
encoding = 'utf-8-sig'
text = text.replace('\n', '\r\n')
else:
encoding = 'utf-8'
text = text.encode(encoding)
f = os.fdopen(fd, 'wb')
f.write(text)
f.close()
timestamp = os.path.getmtime(name)
self.edit_file(name)
if self.require_save \
and os.path.getmtime(name) == timestamp:
return None
f = open(name, 'rb')
try:
rv = f.read()
finally:
f.close()
return rv.decode('utf-8-sig').replace('\r\n', '\n')
finally:
os.unlink(name)
def open_url(url, wait=False, locate=False):
import subprocess
def _unquote_file(url):
try:
import urllib
except ImportError:
import urllib
if url.startswith('file://'):
url = urllib.unquote(url[7:])
return url
if sys.platform == 'darwin':
args = ['open']
if wait:
args.append('-W')
if locate:
args.append('-R')
args.append(_unquote_file(url))
null = open('/dev/null', 'w')
try:
return subprocess.Popen(args, stderr=null).wait()
finally:
null.close()
elif WIN:
if locate:
url = _unquote_file(url)
args = 'explorer /select,"%s"' % _unquote_file(
url.replace('"', ''))
else:
args = 'start %s "" "%s"' % (
wait and '/WAIT' or '', url.replace('"', ''))
return os.system(args)
try:
if locate:
url = os.path.dirname(_unquote_file(url)) or '.'
else:
url = _unquote_file(url)
c = subprocess.Popen(['xdg-open', url])
if wait:
return c.wait()
return 0
except OSError:
if url.startswith(('http://', 'https://')) and not locate and not wait:
import webbrowser
webbrowser.open(url)
return 0
return 1
def _translate_ch_to_exc(ch):
if ch == '\x03':
raise KeyboardInterrupt()
if ch == '\x04':
raise EOFError()
if WIN:
import msvcrt
def getchar(echo):
rv = msvcrt.getch()
if echo:
msvcrt.putchar(rv)
_translate_ch_to_exc(rv)
if PY2:
enc = getattr(sys.stdin, 'encoding', None)
if enc is not None:
rv = rv.decode(enc, 'replace')
else:
rv = rv.decode('cp1252', 'replace')
return rv
else:
import tty
import termios
def getchar(echo):
if not isatty(sys.stdin):
f = open('/dev/tty')
fd = f.fileno()
else:
fd = sys.stdin.fileno()
f = None
try:
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(fd)
ch = os.read(fd, 32)
if echo and isatty(sys.stdout):
sys.stdout.write(ch)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
sys.stdout.flush()
if f is not None:
f.close()
except termios.error:
pass
_translate_ch_to_exc(ch)
return ch.decode(get_best_encoding(sys.stdin), 'replace')
| mit |
petr-tichy/thefuck | setup.py | 1 | 1212 | #!/usr/bin/env python
from setuptools import setup, find_packages
import sys
if sys.version_info < (2, 7):
print('thefuck requires Python version 2.7 or later' +
' ({}.{} detected).'.format(*sys.version_info[:2]))
sys.exit(-1)
elif (3, 0) < sys.version_info < (3, 3):
print('thefuck requires Python version 3.3 or later' +
' ({}.{} detected).'.format(*sys.version_info[:2]))
sys.exit(-1)
VERSION = '1.49.1'
install_requires = ['psutil', 'colorama', 'six']
extras_require = {':python_version<"3.4"': ['pathlib']}
setup(name='thefuck',
version=VERSION,
description="Magnificent app which corrects your previous console command",
author='Vladimir Iakovlev',
author_email='nvbn.rm@gmail.com',
url='https://github.com/nvbn/thefuck',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples',
'tests', 'release']),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
extras_require=extras_require,
entry_points={'console_scripts': [
'thefuck = thefuck.main:main',
'thefuck-alias = thefuck.shells:app_alias']})
| mit |
shakamunyi/tensorflow | tensorflow/contrib/learn/python/learn/tests/test_regression.py | 8 | 1610 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
class RegressionTest(tf.test.TestCase):
def testLinearRegression(self):
rng = np.random.RandomState(67)
N = 1000
n_weights = 10
self.bias = 2
self.X = rng.uniform(-1, 1, (N, n_weights))
self.weights = 10 * rng.randn(n_weights)
self.y = np.dot(self.X, self.weights)
self.y += rng.randn(len(self.X)) * 0.05 + rng.normal(self.bias, 0.01)
regressor = learn.TensorFlowLinearRegressor(optimizer="SGD")
regressor.fit(self.X, self.y)
# Have to flatten weights since they come in (X, 1) shape
self.assertAllClose(self.weights, regressor.weights_.flatten(), rtol=0.01)
assert abs(self.bias - regressor.bias_) < 0.1
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
jhayworth/config | .emacs.d/elpy/rpc-venv/local/lib/python2.7/site-packages/wheel/macosx_libfile.py | 6 | 11858 | """
This module contains function to analyse dynamic library
headers to extract system information
Currently only for MacOSX
Library file on macosx system starts with Mach-O or Fat field.
This can be distinguish by first 32 bites and it is called magic number.
Proper value of magic number is with suffix _MAGIC. Suffix _CIGAM means
reversed bytes order.
Both fields can occur in two types: 32 and 64 bytes.
FAT field inform that this library contains few version of library
(typically for different types version). It contains
information where Mach-O headers starts.
Each section started with Mach-O header contains one library
(So if file starts with this field it contains only one version).
After filed Mach-O there are section fields.
Each of them starts with two fields:
cmd - magic number for this command
cmdsize - total size occupied by this section information.
In this case only sections LC_VERSION_MIN_MACOSX (for macosx 10.13 and earlier)
and LC_BUILD_VERSION (for macosx 10.14 and newer) are interesting,
because them contains information about minimal system version.
Important remarks:
- For fat files this implementation looks for maximum number version.
It not check if it is 32 or 64 and do not compare it with currently builded package.
So it is possible to false report higher version that needed.
- All structures signatures are taken form macosx header files.
- I think that binary format will be more stable than `otool` output.
and if apple introduce some changes both implementation will need to be updated.
"""
import ctypes
import sys
"""here the needed const and struct from mach-o header files"""
FAT_MAGIC = 0xcafebabe
FAT_CIGAM = 0xbebafeca
FAT_MAGIC_64 = 0xcafebabf
FAT_CIGAM_64 = 0xbfbafeca
MH_MAGIC = 0xfeedface
MH_CIGAM = 0xcefaedfe
MH_MAGIC_64 = 0xfeedfacf
MH_CIGAM_64 = 0xcffaedfe
LC_VERSION_MIN_MACOSX = 0x24
LC_BUILD_VERSION = 0x32
mach_header_fields = [
("magic", ctypes.c_uint32), ("cputype", ctypes.c_int),
("cpusubtype", ctypes.c_int), ("filetype", ctypes.c_uint32),
("ncmds", ctypes.c_uint32), ("sizeofcmds", ctypes.c_uint32),
("flags", ctypes.c_uint32)
]
"""
struct mach_header {
uint32_t magic; /* mach magic number identifier */
cpu_type_t cputype; /* cpu specifier */
cpu_subtype_t cpusubtype; /* machine specifier */
uint32_t filetype; /* type of file */
uint32_t ncmds; /* number of load commands */
uint32_t sizeofcmds; /* the size of all the load commands */
uint32_t flags; /* flags */
};
typedef integer_t cpu_type_t;
typedef integer_t cpu_subtype_t;
"""
mach_header_fields_64 = mach_header_fields + [("reserved", ctypes.c_uint32)]
"""
struct mach_header_64 {
uint32_t magic; /* mach magic number identifier */
cpu_type_t cputype; /* cpu specifier */
cpu_subtype_t cpusubtype; /* machine specifier */
uint32_t filetype; /* type of file */
uint32_t ncmds; /* number of load commands */
uint32_t sizeofcmds; /* the size of all the load commands */
uint32_t flags; /* flags */
uint32_t reserved; /* reserved */
};
"""
fat_header_fields = [("magic", ctypes.c_uint32), ("nfat_arch", ctypes.c_uint32)]
"""
struct fat_header {
uint32_t magic; /* FAT_MAGIC or FAT_MAGIC_64 */
uint32_t nfat_arch; /* number of structs that follow */
};
"""
fat_arch_fields = [
("cputype", ctypes.c_int), ("cpusubtype", ctypes.c_int),
("offset", ctypes.c_uint32), ("size", ctypes.c_uint32),
("align", ctypes.c_uint32)
]
"""
struct fat_arch {
cpu_type_t cputype; /* cpu specifier (int) */
cpu_subtype_t cpusubtype; /* machine specifier (int) */
uint32_t offset; /* file offset to this object file */
uint32_t size; /* size of this object file */
uint32_t align; /* alignment as a power of 2 */
};
"""
fat_arch_64_fields = [
("cputype", ctypes.c_int), ("cpusubtype", ctypes.c_int),
("offset", ctypes.c_uint64), ("size", ctypes.c_uint64),
("align", ctypes.c_uint32), ("reserved", ctypes.c_uint32)
]
"""
struct fat_arch_64 {
cpu_type_t cputype; /* cpu specifier (int) */
cpu_subtype_t cpusubtype; /* machine specifier (int) */
uint64_t offset; /* file offset to this object file */
uint64_t size; /* size of this object file */
uint32_t align; /* alignment as a power of 2 */
uint32_t reserved; /* reserved */
};
"""
segment_base_fields = [("cmd", ctypes.c_uint32), ("cmdsize", ctypes.c_uint32)]
"""base for reading segment info"""
segment_command_fields = [
("cmd", ctypes.c_uint32), ("cmdsize", ctypes.c_uint32),
("segname", ctypes.c_char * 16), ("vmaddr", ctypes.c_uint32),
("vmsize", ctypes.c_uint32), ("fileoff", ctypes.c_uint32),
("filesize", ctypes.c_uint32), ("maxprot", ctypes.c_int),
("initprot", ctypes.c_int), ("nsects", ctypes.c_uint32),
("flags", ctypes.c_uint32),
]
"""
struct segment_command { /* for 32-bit architectures */
uint32_t cmd; /* LC_SEGMENT */
uint32_t cmdsize; /* includes sizeof section structs */
char segname[16]; /* segment name */
uint32_t vmaddr; /* memory address of this segment */
uint32_t vmsize; /* memory size of this segment */
uint32_t fileoff; /* file offset of this segment */
uint32_t filesize; /* amount to map from the file */
vm_prot_t maxprot; /* maximum VM protection */
vm_prot_t initprot; /* initial VM protection */
uint32_t nsects; /* number of sections in segment */
uint32_t flags; /* flags */
};
typedef int vm_prot_t;
"""
segment_command_fields_64 = [
("cmd", ctypes.c_uint32), ("cmdsize", ctypes.c_uint32),
("segname", ctypes.c_char * 16), ("vmaddr", ctypes.c_uint64),
("vmsize", ctypes.c_uint64), ("fileoff", ctypes.c_uint64),
("filesize", ctypes.c_uint64), ("maxprot", ctypes.c_int),
("initprot", ctypes.c_int), ("nsects", ctypes.c_uint32),
("flags", ctypes.c_uint32),
]
"""
struct segment_command_64 { /* for 64-bit architectures */
uint32_t cmd; /* LC_SEGMENT_64 */
uint32_t cmdsize; /* includes sizeof section_64 structs */
char segname[16]; /* segment name */
uint64_t vmaddr; /* memory address of this segment */
uint64_t vmsize; /* memory size of this segment */
uint64_t fileoff; /* file offset of this segment */
uint64_t filesize; /* amount to map from the file */
vm_prot_t maxprot; /* maximum VM protection */
vm_prot_t initprot; /* initial VM protection */
uint32_t nsects; /* number of sections in segment */
uint32_t flags; /* flags */
};
"""
version_min_command_fields = segment_base_fields + \
[("version", ctypes.c_uint32), ("sdk", ctypes.c_uint32)]
"""
struct version_min_command {
uint32_t cmd; /* LC_VERSION_MIN_MACOSX or
LC_VERSION_MIN_IPHONEOS or
LC_VERSION_MIN_WATCHOS or
LC_VERSION_MIN_TVOS */
uint32_t cmdsize; /* sizeof(struct min_version_command) */
uint32_t version; /* X.Y.Z is encoded in nibbles xxxx.yy.zz */
uint32_t sdk; /* X.Y.Z is encoded in nibbles xxxx.yy.zz */
};
"""
build_version_command_fields = segment_base_fields + \
[("platform", ctypes.c_uint32), ("minos", ctypes.c_uint32),
("sdk", ctypes.c_uint32), ("ntools", ctypes.c_uint32)]
"""
struct build_version_command {
uint32_t cmd; /* LC_BUILD_VERSION */
uint32_t cmdsize; /* sizeof(struct build_version_command) plus */
/* ntools * sizeof(struct build_tool_version) */
uint32_t platform; /* platform */
uint32_t minos; /* X.Y.Z is encoded in nibbles xxxx.yy.zz */
uint32_t sdk; /* X.Y.Z is encoded in nibbles xxxx.yy.zz */
uint32_t ntools; /* number of tool entries following this */
};
"""
def swap32(x):
return (((x << 24) & 0xFF000000) |
((x << 8) & 0x00FF0000) |
((x >> 8) & 0x0000FF00) |
((x >> 24) & 0x000000FF))
def get_base_class_and_magic_number(lib_file, seek=None):
if seek is None:
seek = lib_file.tell()
else:
lib_file.seek(seek)
magic_number = ctypes.c_uint32.from_buffer_copy(
lib_file.read(ctypes.sizeof(ctypes.c_uint32))).value
# Handle wrong byte order
if magic_number in [FAT_CIGAM, FAT_CIGAM_64, MH_CIGAM, MH_CIGAM_64]:
if sys.byteorder == "little":
BaseClass = ctypes.BigEndianStructure
else:
BaseClass = ctypes.LittleEndianStructure
magic_number = swap32(magic_number)
else:
BaseClass = ctypes.Structure
lib_file.seek(seek)
return BaseClass, magic_number
def read_data(struct_class, lib_file):
return struct_class.from_buffer_copy(lib_file.read(
ctypes.sizeof(struct_class)))
def extract_macosx_min_system_version(path_to_lib):
with open(path_to_lib, "rb") as lib_file:
BaseClass, magic_number = get_base_class_and_magic_number(lib_file, 0)
if magic_number not in [FAT_MAGIC, FAT_MAGIC_64, MH_MAGIC, MH_MAGIC_64]:
return
if magic_number in [FAT_MAGIC, FAT_CIGAM_64]:
class FatHeader(BaseClass):
_fields_ = fat_header_fields
fat_header = read_data(FatHeader, lib_file)
if magic_number == FAT_MAGIC:
class FatArch(BaseClass):
_fields_ = fat_arch_fields
else:
class FatArch(BaseClass):
_fields_ = fat_arch_64_fields
fat_arch_list = [read_data(FatArch, lib_file) for _ in range(fat_header.nfat_arch)]
versions_list = []
for el in fat_arch_list:
try:
version = read_mach_header(lib_file, el.offset)
if version is not None:
versions_list.append(version)
except ValueError:
pass
if len(versions_list) > 0:
return max(versions_list)
else:
return None
else:
try:
return read_mach_header(lib_file, 0)
except ValueError:
"""when some error during read library files"""
return None
def read_mach_header(lib_file, seek=None):
"""
This funcition parse mach-O header and extract
information about minimal system version
:param lib_file: reference to opened library file with pointer
"""
if seek is not None:
lib_file.seek(seek)
base_class, magic_number = get_base_class_and_magic_number(lib_file)
arch = "32" if magic_number == MH_MAGIC else "64"
class SegmentBase(base_class):
_fields_ = segment_base_fields
if arch == "32":
class MachHeader(base_class):
_fields_ = mach_header_fields
else:
class MachHeader(base_class):
_fields_ = mach_header_fields_64
mach_header = read_data(MachHeader, lib_file)
for _i in range(mach_header.ncmds):
pos = lib_file.tell()
segment_base = read_data(SegmentBase, lib_file)
lib_file.seek(pos)
if segment_base.cmd == LC_VERSION_MIN_MACOSX:
class VersionMinCommand(base_class):
_fields_ = version_min_command_fields
version_info = read_data(VersionMinCommand, lib_file)
return parse_version(version_info.version)
elif segment_base.cmd == LC_BUILD_VERSION:
class VersionBuild(base_class):
_fields_ = build_version_command_fields
version_info = read_data(VersionBuild, lib_file)
return parse_version(version_info.minos)
else:
lib_file.seek(pos + segment_base.cmdsize)
continue
def parse_version(version):
x = (version & 0xffff0000) >> 16
y = (version & 0x0000ff00) >> 8
z = (version & 0x000000ff)
return x, y, z
| gpl-3.0 |
turbomanage/training-data-analyst | courses/machine_learning/deepdive2/structured/solutions/serving/application/lib/itsdangerous/timed.py | 11 | 5635 | import time
from datetime import datetime
from ._compat import text_type
from .encoding import base64_decode
from .encoding import base64_encode
from .encoding import bytes_to_int
from .encoding import int_to_bytes
from .encoding import want_bytes
from .exc import BadSignature
from .exc import BadTimeSignature
from .exc import SignatureExpired
from .serializer import Serializer
from .signer import Signer
class TimestampSigner(Signer):
"""Works like the regular :class:`.Signer` but also records the time
of the signing and can be used to expire signatures. The
:meth:`unsign` method can raise :exc:`.SignatureExpired` if the
unsigning failed because the signature is expired.
"""
def get_timestamp(self):
"""Returns the current timestamp. The function must return an
integer.
"""
return int(time.time())
def timestamp_to_datetime(self, ts):
"""Used to convert the timestamp from :meth:`get_timestamp` into
a datetime object.
"""
return datetime.utcfromtimestamp(ts)
def sign(self, value):
"""Signs the given string and also attaches time information."""
value = want_bytes(value)
timestamp = base64_encode(int_to_bytes(self.get_timestamp()))
sep = want_bytes(self.sep)
value = value + sep + timestamp
return value + sep + self.get_signature(value)
def unsign(self, value, max_age=None, return_timestamp=False):
"""Works like the regular :meth:`.Signer.unsign` but can also
validate the time. See the base docstring of the class for
the general behavior. If ``return_timestamp`` is ``True`` the
timestamp of the signature will be returned as a naive
:class:`datetime.datetime` object in UTC.
"""
try:
result = Signer.unsign(self, value)
sig_error = None
except BadSignature as e:
sig_error = e
result = e.payload or b""
sep = want_bytes(self.sep)
# If there is no timestamp in the result there is something
# seriously wrong. In case there was a signature error, we raise
# that one directly, otherwise we have a weird situation in
# which we shouldn't have come except someone uses a time-based
# serializer on non-timestamp data, so catch that.
if sep not in result:
if sig_error:
raise sig_error
raise BadTimeSignature("timestamp missing", payload=result)
value, timestamp = result.rsplit(sep, 1)
try:
timestamp = bytes_to_int(base64_decode(timestamp))
except Exception:
timestamp = None
# Signature is *not* okay. Raise a proper error now that we have
# split the value and the timestamp.
if sig_error is not None:
raise BadTimeSignature(
text_type(sig_error), payload=value, date_signed=timestamp
)
# Signature was okay but the timestamp is actually not there or
# malformed. Should not happen, but we handle it anyway.
if timestamp is None:
raise BadTimeSignature("Malformed timestamp", payload=value)
# Check timestamp is not older than max_age
if max_age is not None:
age = self.get_timestamp() - timestamp
if age > max_age:
raise SignatureExpired(
"Signature age %s > %s seconds" % (age, max_age),
payload=value,
date_signed=self.timestamp_to_datetime(timestamp),
)
if return_timestamp:
return value, self.timestamp_to_datetime(timestamp)
return value
def validate(self, signed_value, max_age=None):
"""Only validates the given signed value. Returns ``True`` if
the signature exists and is valid."""
try:
self.unsign(signed_value, max_age=max_age)
return True
except BadSignature:
return False
class TimedSerializer(Serializer):
"""Uses :class:`TimestampSigner` instead of the default
:class:`.Signer`.
"""
default_signer = TimestampSigner
def loads(self, s, max_age=None, return_timestamp=False, salt=None):
"""Reverse of :meth:`dumps`, raises :exc:`.BadSignature` if the
signature validation fails. If a ``max_age`` is provided it will
ensure the signature is not older than that time in seconds. In
case the signature is outdated, :exc:`.SignatureExpired` is
raised. All arguments are forwarded to the signer's
:meth:`~TimestampSigner.unsign` method.
"""
s = want_bytes(s)
last_exception = None
for signer in self.iter_unsigners(salt):
try:
base64d, timestamp = signer.unsign(s, max_age, return_timestamp=True)
payload = self.load_payload(base64d)
if return_timestamp:
return payload, timestamp
return payload
# If we get a signature expired it means we could read the
# signature but it's invalid. In that case we do not want to
# try the next signer.
except SignatureExpired:
raise
except BadSignature as err:
last_exception = err
raise last_exception
def loads_unsafe(self, s, max_age=None, salt=None):
load_kwargs = {"max_age": max_age}
load_payload_kwargs = {}
return self._loads_unsafe_impl(s, salt, load_kwargs, load_payload_kwargs)
| apache-2.0 |
eeshangarg/oh-mainline | vendor/packages/twisted/twisted/persisted/journal/base.py | 19 | 6454 | # -*- test-case-name: twisted.test.test_journal -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""Basic classes and interfaces for journal."""
from __future__ import nested_scopes
# system imports
import os, time
try:
import cPickle as pickle
except ImportError:
import pickle
# twisted imports
from zope.interface import implements, Interface
class Journal:
"""All commands to the system get routed through here.
Subclasses should implement the actual snapshotting capability.
"""
def __init__(self, log, journaledService):
self.log = log
self.journaledService = journaledService
self.latestIndex = self.log.getCurrentIndex()
def updateFromLog(self):
"""Run all commands from log that haven't been run yet.
This method should be run on startup to ensure the snapshot
is up-to-date.
"""
snapshotIndex = self.getLastSnapshot()
if snapshotIndex < self.latestIndex:
for cmdtime, command in self.log.getCommandsSince(snapshotIndex + 1):
command.execute(self.journaledService, cmdtime)
def executeCommand(self, command):
"""Log and execute a command."""
runTime = time.time()
d = self.log.logCommand(command, runTime)
d.addCallback(self._reallyExecute, command, runTime)
return d
def _reallyExecute(self, index, command, runTime):
"""Callback called when logging command is done."""
result = command.execute(self.journaledService, runTime)
self.latestIndex = index
return result
def getLastSnapshot(self):
"""Return command index of the last snapshot taken."""
raise NotImplementedError
def sync(self, *args, **kwargs):
"""Save journal to disk, returns Deferred of finish status.
Subclasses may choose whatever signature is appropriate, or may
not implement this at all.
"""
raise NotImplementedError
class MemoryJournal(Journal):
"""Prevayler-like journal that dumps from memory to disk."""
def __init__(self, log, journaledService, path, loadedCallback):
self.path = path
if os.path.exists(path):
try:
self.lastSync, obj = pickle.load(open(path, "rb"))
except (IOError, OSError, pickle.UnpicklingError):
self.lastSync, obj = 0, None
loadedCallback(obj)
else:
self.lastSync = 0
loadedCallback(None)
Journal.__init__(self, log, journaledService)
def getLastSnapshot(self):
return self.lastSync
def sync(self, obj):
# make this more reliable at some point
f = open(self.path, "wb")
pickle.dump((self.latestIndex, obj), f, 1)
f.close()
self.lastSync = self.latestIndex
class ICommand(Interface):
"""A serializable command which interacts with a journaled service."""
def execute(journaledService, runTime):
"""Run the command and return result."""
class ICommandLog(Interface):
"""Interface for command log."""
def logCommand(command, runTime):
"""Add a command and its run time to the log.
@return: Deferred of command index.
"""
def getCurrentIndex():
"""Return index of last command that was logged."""
def getCommandsSince(index):
"""Return commands who's index >= the given one.
@return: list of (time, command) tuples, sorted with ascending times.
"""
class LoadingService:
"""Base class for journalled service used with Wrappables."""
def loadObject(self, objType, objId):
"""Return object of specified type and id."""
raise NotImplementedError
class Wrappable:
"""Base class for objects used with LoadingService."""
objectType = None # override in base class
def getUid(self):
"""Return uid for loading with LoadingService.loadObject"""
raise NotImplementedError
class WrapperCommand:
implements(ICommand)
def __init__(self, methodName, obj, args=(), kwargs={}):
self.obj = obj
self.objId = obj.getUid()
self.objType = obj.objectType
self.methodName = methodName
self.args = args
self.kwargs = kwargs
def execute(self, svc, commandTime):
if not hasattr(self, "obj"):
obj = svc.loadObject(self.objType, self.objId)
else:
obj = self.obj
return getattr(obj, self.methodName)(*self.args, **self.kwargs)
def __getstate__(self):
d = self.__dict__.copy()
del d["obj"]
return d
def command(methodName, cmdClass=WrapperCommand):
"""Wrap a method so it gets turned into command automatically.
For use with Wrappables.
Usage::
| class Foo(Wrappable):
| objectType = "foo"
| def getUid(self):
| return self.id
| def _bar(self, x):
| return x + 1
|
| bar = command('_bar')
The resulting callable will have signature identical to wrapped
function, except that it expects journal as first argument, and
returns a Deferred.
"""
def wrapper(obj, journal, *args, **kwargs):
return journal.executeCommand(cmdClass(methodName, obj, args, kwargs))
return wrapper
class ServiceWrapperCommand:
implements(ICommand)
def __init__(self, methodName, args=(), kwargs={}):
self.methodName = methodName
self.args = args
self.kwargs = kwargs
def execute(self, svc, commandTime):
return getattr(svc, self.methodName)(*self.args, **self.kwargs)
def __repr__(self):
return "<ServiceWrapperCommand: %s, %s, %s>" % (self.methodName, self.args, self.kwargs)
def __cmp__(self, other):
if hasattr(other, "__dict__"):
return cmp(self.__dict__, other.__dict__)
else:
return 0
def serviceCommand(methodName, cmdClass=ServiceWrapperCommand):
"""Wrap methods into commands for a journalled service.
The resulting callable will have signature identical to wrapped
function, except that it expects journal as first argument, and
returns a Deferred.
"""
def wrapper(obj, journal, *args, **kwargs):
return journal.executeCommand(cmdClass(methodName, args, kwargs))
return wrapper
| agpl-3.0 |
twilio/calworkshop | twilio/twiml.py | 2 | 14038 | """
Make sure to check out the TwiML overview and tutorial
"""
import xml.etree.ElementTree as ET
class TwimlException(Exception):
pass
class Verb(object):
"""Twilio basic verb object.
"""
GET = "GET"
POST = "POST"
nestables = None
def __init__(self, **kwargs):
self.name = self.__class__.__name__
self.body = None
self.verbs = []
self.attrs = {}
if kwargs.get("waitMethod", "GET") not in ["GET", "POST"]:
raise TwimlException("Invalid waitMethod parameter, "
"must be 'GET' or 'POST'")
if kwargs.get("method", "GET") not in ["GET", "POST"]:
raise TwimlException("Invalid method parameter, "
"must be 'GET' or 'POST'")
for k, v in kwargs.items():
if k == "sender":
k = "from"
if v is not None:
self.attrs[k] = v
def __str__(self):
return self.toxml()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
return False
def toxml(self, xml_declaration=True):
"""
Return the contents of this verb as an XML string
:param bool xml_declaration: Include the XML declaration. Defaults to
True
"""
xml = ET.tostring(self.xml()).encode("utf-8")
if xml_declaration:
return u'<?xml version="1.0" encoding="UTF-8"?>' + xml
else:
return xml
def xml(self):
el = ET.Element(self.name)
keys = self.attrs.keys()
keys.sort()
for a in keys:
value = self.attrs[a]
if isinstance(value, bool):
el.set(a, str(value).lower())
else:
el.set(a, str(value))
if self.body:
el.text = self.body
for verb in self.verbs:
el.append(verb.xml())
return el
def append(self, verb):
if not self.nestables or verb.name not in self.nestables:
raise TwimlException("%s is not nestable inside %s" %
(verb.name, self.name))
self.verbs.append(verb)
return verb
class Response(Verb):
"""Twilio response object."""
nestables = [
'Say',
'Play',
'Gather',
'Record',
'Dial',
'Redirect',
'Pause',
'Hangup',
'Reject',
'Sms',
'Enqueue',
'Leave'
]
def __init__(self, **kwargs):
"""Version: Twilio API version e.g. 2008-08-01 """
super(Response, self).__init__(**kwargs)
def say(self, text, **kwargs):
"""Return a newly created :class:`Say` verb, nested inside this
:class:`Response` """
return self.append(Say(text, **kwargs))
def play(self, url, **kwargs):
"""Return a newly created :class:`Play` verb, nested inside this
:class:`Response` """
return self.append(Play(url, **kwargs))
def pause(self, **kwargs):
"""Return a newly created :class:`Pause` verb, nested inside this
:class:`Response` """
return self.append(Pause(**kwargs))
def redirect(self, url=None, **kwargs):
"""Return a newly created :class:`Redirect` verb, nested inside this
:class:`Response` """
return self.append(Redirect(url, **kwargs))
def hangup(self, **kwargs):
"""Return a newly created :class:`Hangup` verb, nested inside this
:class:`Response` """
return self.append(Hangup(**kwargs))
def reject(self, reason=None, **kwargs):
"""Return a newly created :class:`Hangup` verb, nested inside this
:class:`Response` """
return self.append(Reject(reason=reason, **kwargs))
def gather(self, **kwargs):
"""Return a newly created :class:`Gather` verb, nested inside this
:class:`Response` """
return self.append(Gather(**kwargs))
def dial(self, number=None, **kwargs):
"""Return a newly created :class:`Dial` verb, nested inside this
:class:`Response` """
return self.append(Dial(number, **kwargs))
def enqueue(self, name, **kwargs):
"""Return a newly created :class:`Enqueue` verb, nested inside this
:class:`Response` """
return self.append(Enqueue(name, **kwargs))
def leave(self, **kwargs):
"""Return a newly created :class:`Leave` verb, nested inside this
:class:`Response` """
return self.append(Leave(**kwargs))
def record(self, **kwargs):
"""Return a newly created :class:`Record` verb, nested inside this
:class:`Response` """
return self.append(Record(**kwargs))
def sms(self, msg, **kwargs):
"""Return a newly created :class:`Sms` verb, nested inside this
:class:`Response` """
return self.append(Sms(msg, **kwargs))
# All add* methods are deprecated
def addSay(self, *args, **kwargs):
return self.say(*args, **kwargs)
def addPlay(self, *args, **kwargs):
return self.play(*args, **kwargs)
def addPause(self, *args, **kwargs):
return self.pause(*args, **kwargs)
def addRedirect(self, *args, **kwargs):
return self.redirect(*args, **kwargs)
def addHangup(self, *args, **kwargs):
return self.hangup(*args, **kwargs)
def addReject(self, *args, **kwargs):
return self.reject(*args, **kwargs)
def addGather(self, *args, **kwargs):
return self.gather(*args, **kwargs)
def addDial(self, *args, **kwargs):
return self.dial(*args, **kwargs)
def addRecord(self, *args, **kwargs):
return self.record(*args, **kwargs)
def addSms(self, *args, **kwargs):
return self.sms(*args, **kwargs)
class Say(Verb):
"""The :class:`Say` verb converts text to speech that is read back to the
caller.
:param voice: allows you to choose a male or female voice to read text
back.
:param language: allows you pick a voice with a specific language's accent
and pronunciations. Twilio currently supports languages
'en' (English), 'es' (Spanish), 'fr' (French), and 'de'
(German), 'en-gb' (English Great Britain").
:param loop: specifies how many times you'd like the text repeated.
Specifying '0' will cause the the :class:`Say` verb to loop
until the call is hung up.
"""
MAN = 'man'
WOMAN = 'woman'
ENGLISH = 'en'
BRITISH = 'en-gb'
SPANISH = 'es'
FRENCH = 'fr'
GERMAN = 'de'
def __init__(self, text, **kwargs):
super(Say, self).__init__(**kwargs)
self.body = text
class Play(Verb):
"""Play an audio file at a URL
:param url: point to af audio file. The MIME type on the file must be set
correctly.
:param loop: specifies how many times you'd like the text repeated.
Specifying '0' will cause the the :class:`Say` verb to loop
until the call is hung up. Defaults to 1.
"""
def __init__(self, url, **kwargs):
super(Play, self).__init__(**kwargs)
self.body = url
class Pause(Verb):
"""Pause the call
:param length: specifies how many seconds Twilio will wait silently before
continuing on.
"""
class Redirect(Verb):
"""Redirect call flow to another URL
:param url: specifies the url which Twilio should query to retrieve new
TwiML. The default is the current url
:param method: specifies the HTTP method to use when retrieving the url
"""
GET = 'GET'
POST = 'POST'
def __init__(self, url="", **kwargs):
super(Redirect, self).__init__(**kwargs)
self.body = url
class Hangup(Verb):
"""Hangup the call
"""
class Reject(Verb):
"""Hangup the call
:param reason: not sure
"""
class Gather(Verb):
"""Gather digits from the caller's keypad
:param action: URL to which the digits entered will be sent
:param method: submit to 'action' url using GET or POST
:param numDigits: how many digits to gather before returning
:param timeout: wait for this many seconds before returning
:param finishOnKey: key that triggers the end of caller input
"""
GET = 'GET'
POST = 'POST'
nestables = ['Say', 'Play', 'Pause']
def __init__(self, **kwargs):
super(Gather, self).__init__(**kwargs)
def say(self, text, **kwargs):
return self.append(Say(text, **kwargs))
def play(self, url, **kwargs):
return self.append(Play(url, **kwargs))
def pause(self, **kwargs):
return self.append(Pause(**kwargs))
def addSay(self, *args, **kwargs):
return self.say(*args, **kwargs)
def addPlay(self, *args, **kwargs):
return self.play(*args, **kwargs)
def addPause(self, *args, **kwargs):
return self.pause(*args, **kwargs)
class Number(Verb):
"""Specify phone number in a nested Dial element.
:param number: phone number to dial
:param sendDigits: key to press after connecting to the number
"""
def __init__(self, number, **kwargs):
super(Number, self).__init__(**kwargs)
self.body = number
class Client(Verb):
"""Specify a client name to call in a nested Dial element.
:param name: Client name to connect to
"""
def __init__(self, name, **kwargs):
super(Client, self).__init__(**kwargs)
self.body = name
class Sms(Verb):
""" Send a Sms Message to a phone number
:param to: whom to send message to
:param sender: whom to send message from.
:param action: url to request after the message is queued
:param method: submit to 'action' url using GET or POST
:param statusCallback: url to hit when the message is actually sent
"""
GET = 'GET'
POST = 'POST'
def __init__(self, msg, **kwargs):
super(Sms, self).__init__(**kwargs)
self.body = msg
class Conference(Verb):
"""Specify conference in a nested Dial element.
:param name: friendly name of conference
:param bool muted: keep this participant muted
:param bool beep: play a beep when this participant enters/leaves
:param bool startConferenceOnEnter: start conf when this participants joins
:param bool endConferenceOnExit: end conf when this participants leaves
:param waitUrl: TwiML url that executes before conference starts
:param waitMethod: HTTP method for waitUrl GET/POST
"""
GET = 'GET'
POST = 'POST'
def __init__(self, name, **kwargs):
super(Conference, self).__init__(**kwargs)
self.body = name
class Dial(Verb):
"""Dial another phone number and connect it to this call
:param action: submit the result of the dial to this URL
:param method: submit to 'action' url using GET or POST
:param int timeout: The number of seconds to waits for the called
party to answer the call
:param bool hangupOnStar: Allow the calling party to hang up on the
called party by pressing the '*' key
:param int timeLimit: The maximum duration of the Call in seconds
:param callerId: The caller ID that will appear to the called party
:param bool record: Record both legs of a call within this <Dial>
"""
GET = 'GET'
POST = 'POST'
nestables = ['Number', 'Conference', 'Client', 'Queue']
def __init__(self, number=None, **kwargs):
super(Dial, self).__init__(**kwargs)
if number and len(number.split(',')) > 1:
for n in number.split(','):
self.append(Number(n.strip()))
else:
self.body = number
def client(self, name, **kwargs):
return self.append(Client(name, **kwargs))
def number(self, number, **kwargs):
return self.append(Number(number, **kwargs))
def conference(self, name, **kwargs):
return self.append(Conference(name, **kwargs))
def queue(self, name, **kwargs):
return self.append(Queue(name, **kwargs))
def addNumber(self, *args, **kwargs):
return self.number(*args, **kwargs)
def addConference(self, *args, **kwargs):
return self.conference(*args, **kwargs)
class Queue(Verb):
"""Specify queue in a nested Dial element.
:param name: friendly name for the queue
:param url: url to a twiml document that executes after a call is dequeued
and before the call is connected
:param method: HTTP method for url GET/POST
"""
GET = 'GET'
POST = 'POST'
def __init__(self, name, **kwargs):
super(Queue, self).__init__(**kwargs)
self.body = name
class Enqueue(Verb):
"""Enqueue the call into a specific queue.
:param name: friendly name for the queue
:param action: url to a twiml document that executes when the call
leaves the queue. When dequeued via a <Dial> verb,
this url is executed after the bridged parties disconnect
:param method: HTTP method for action GET/POST
:param wait_url: url to a twiml document that executes
while the call is on the queue
:param wait_url_method: HTTP method for wait_url GET/POST
"""
GET = 'GET'
POST = 'POST'
def __init__(self, name, **kwargs):
super(Enqueue, self).__init__(**kwargs)
self.body = name
class Leave(Verb):
"""Signals the call to leave its queue
"""
GET = 'GET'
POST = 'POST'
class Record(Verb):
"""Record audio from caller
:param action: submit the result of the dial to this URL
:param method: submit to 'action' url using GET or POST
:param maxLength: maximum number of seconds to record
:param timeout: seconds of silence before considering the recording done
"""
GET = 'GET'
POST = 'POST'
| mit |
Vagab0nd/SiCKRAGE | sickchill/oldbeard/databases/movie.py | 1 | 6379 | import datetime
import logging
import guessit
from slugify import slugify
from sqlalchemy import Boolean, Column, Date, DateTime, ForeignKey, Integer, Interval, JSON, SmallInteger, String
from sqlalchemy.event import listen
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
logger = logging.getLogger('sickchill.movie')
Base = declarative_base()
Session = sessionmaker()
class Movie(Base):
__tablename__ = "movie"
pk = Column(Integer, primary_key=True)
name = Column(String)
date = Column(Date)
year = Column(SmallInteger)
status = Column(Integer)
paused = Column(Boolean, default=False)
location = Column(String)
start = Column(Interval, default=datetime.timedelta(days=-7))
interval = Column(Interval, default=datetime.timedelta(days=1))
added = Column(DateTime, default=datetime.datetime.now)
updated = Column(DateTime, onupdate=datetime.datetime.now)
completed = Column(DateTime)
searched = Column(DateTime)
slug = Column(String)
language = Column(String)
result = relationship("Result", uselist=False, backref="downloaded")
results: list = relationship("Result", backref="movie")
images: list = relationship("Images", backref="movie")
indexer_data: list = relationship("IndexerData", backref="movie")
def __init__(self, name: str, year: int):
self.name = name
self.year = year
@property
def poster(self):
return ''
def __get_named_indexer_data(self, name):
if self.indexer_data:
for data in self.indexer_data:
if data.site == name:
return data
@property
def imdb_data(self):
data = self.__get_named_indexer_data('imdb')
if data:
return data.data
return dict()
@property
def imdb_id(self):
data = self.__get_named_indexer_data('imdb')
if data:
return data.pk
return ''
@property
def tmdb_id(self):
data = self.__get_named_indexer_data('tmdb')
if data:
return data.pk
return ''
@property
def imdb_genres(self):
data = self.__get_named_indexer_data('imdb')
if data:
return data.genres
return []
def __get_indexer_values(self, name, keys: list):
try:
data = getattr(self, f"{name}_data")
for key in keys:
data = data[key]
return data
except AttributeError:
logger.debug(f'We do not have data for {name}')
except (IndexError, KeyError):
logger.debug(f"KeyError: {name}{''.join([f'[{k}]' for k in keys])}")
@property
def runtime(self):
return self.__get_indexer_values('imdb', ['base', 'runningTimeInMinutes'])
@property
def imdb_votes(self):
return self.__get_indexer_values('imdb', ['ratings', 'ratingCount'])
@property
def imdb_rating(self):
return self.__get_indexer_values('imdb', ['ratings', 'rating'])
@property
def imdb_outline(self):
return self.__get_indexer_values('imdb', ['plot', 'outline', 'text'])
@property
def imdb_summary(self):
return self.__get_indexer_values('imdb', ['plot', 'summaries', 0, 'text'])
@staticmethod
def slugify(target, value, old_value, initiator):
if value and (not target.slug or value != old_value):
target.slug = slugify(value)
def search_strings(self):
return {'Movie': [f"{self.name} {self.year}"]}
def __repr__(self):
return f"{self.name}"
listen(Movie.name, 'set', Movie.slugify, retval=False)
class Result(Base):
__tablename__ = "result"
pk = Column(Integer, primary_key=True)
name = Column(String)
title = Column(String)
url = Column(String)
size = Column(Integer)
year = Column(SmallInteger)
provider = Column(String)
seeders = Column(Integer)
leechers = Column(Integer)
info_hash = Column(String)
group = Column(String)
type = Column(String)
guess = Column(JSON)
found = Column(DateTime, default=datetime.datetime.now)
updated = Column(DateTime, onupdate=datetime.datetime.now)
movie_pk = Column(Integer, ForeignKey('movie.pk'))
session = Session()
def __init__(self, result: dict, movie: Movie, provider):
name = result['title']
guess = guessit.guessit(name)
if not (guess and guess["type"] == "movie"):
logging.debug(f"This is an episode, not a movie: {name}")
return
if not self.session.query(Movie).filter(Movie.name.like(f"{guess['title']}%")).count():
logging.debug(f"This result does not match any of our movies")
return
self.info_hash = result['hash']
self.url = result['link']
self.name = name
self.title = guess["title"]
self.group = guess["release_group"]
self.seeders = result['seeders']
self.leechers = result['leechers']
self.size = result['size']
self.year = guess["year"] or movie.year
self.type = provider.provider_type
self.provider = provider.get_id()
self.guess = guess
self.movie = movie
def __repr__(self):
return f"{self.name}"
class Images(Base):
__tablename__ = 'images'
url = Column(String, primary_key=True)
path = Column(String)
site = Column(String)
style = Column(Integer)
movie_pk = Column(Integer, ForeignKey('movie.pk'))
def __init__(self, site: str, movie_pk: int, url: str, path: str, style: int):
self.url = url
self.path = path
self.site = site
self.style = style
self.movie_pk = movie_pk
class IndexerData(Base):
__tablename__ = 'indexer_data'
pk = Column(String, primary_key=True)
site = Column(String)
data = Column(JSON)
movie_pk = Column(Integer, ForeignKey('movie.pk'))
genres: list = relationship('Genres', backref='indexer_data')
def __repr__(self):
return f"[{self.__tablename__.replace('_', ' ').title()}] {self.site}: {self.pk} - {self.movie.name}"
class Genres(Base):
__tablename__ = 'genres'
pk = Column(String, primary_key=True)
indexer_data_pk = Column(Integer, ForeignKey('indexer_data.pk'))
| gpl-3.0 |
tchernomax/ansible | lib/ansible/modules/cloud/openstack/os_keystone_service.py | 34 | 5676 | #!/usr/bin/python
# Copyright 2016 Sam Yaple
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_keystone_service
short_description: Manage OpenStack Identity services
extends_documentation_fragment: openstack
author: "Sam Yaple (@SamYaple)"
version_added: "2.2"
description:
- Create, update, or delete OpenStack Identity service. If a service
with the supplied name already exists, it will be updated with the
new description and enabled attributes.
options:
name:
description:
- Name of the service
required: true
description:
description:
- Description of the service
enabled:
description:
- Is the service enabled
type: bool
default: 'yes'
service_type:
description:
- The type of service
required: true
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
requirements:
- "python >= 2.7"
- "openstacksdk"
'''
EXAMPLES = '''
# Create a service for glance
- os_keystone_service:
cloud: mycloud
state: present
name: glance
service_type: image
description: OpenStack Image Service
# Delete a service
- os_keystone_service:
cloud: mycloud
state: absent
name: glance
service_type: image
'''
RETURN = '''
service:
description: Dictionary describing the service.
returned: On success when I(state) is 'present'
type: complex
contains:
id:
description: Service ID.
type: string
sample: "3292f020780b4d5baf27ff7e1d224c44"
name:
description: Service name.
type: string
sample: "glance"
service_type:
description: Service type.
type: string
sample: "image"
description:
description: Service description.
type: string
sample: "OpenStack Image Service"
enabled:
description: Service status.
type: boolean
sample: True
id:
description: The service ID.
returned: On success when I(state) is 'present'
type: string
sample: "3292f020780b4d5baf27ff7e1d224c44"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _needs_update(module, service):
if service.enabled != module.params['enabled']:
return True
if service.description is not None and \
service.description != module.params['description']:
return True
return False
def _system_state_change(module, service):
state = module.params['state']
if state == 'absent' and service:
return True
if state == 'present':
if service is None:
return True
return _needs_update(module, service)
return False
def main():
argument_spec = openstack_full_argument_spec(
description=dict(default=None),
enabled=dict(default=True, type='bool'),
name=dict(required=True),
service_type=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
description = module.params['description']
enabled = module.params['enabled']
name = module.params['name']
state = module.params['state']
service_type = module.params['service_type']
sdk, cloud = openstack_cloud_from_module(module)
try:
services = cloud.search_services(name_or_id=name,
filters=dict(type=service_type))
if len(services) > 1:
module.fail_json(msg='Service name %s and type %s are not unique' %
(name, service_type))
elif len(services) == 1:
service = services[0]
else:
service = None
if module.check_mode:
module.exit_json(changed=_system_state_change(module, service))
if state == 'present':
if service is None:
service = cloud.create_service(name=name, description=description,
type=service_type, enabled=True)
changed = True
else:
if _needs_update(module, service):
service = cloud.update_service(
service.id, name=name, type=service_type, enabled=enabled,
description=description)
changed = True
else:
changed = False
module.exit_json(changed=changed, service=service, id=service.id)
elif state == 'absent':
if service is None:
changed = False
else:
cloud.delete_service(service.id)
changed = True
module.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
Alberto97/android_kernel_motorola_msm8226 | tools/perf/scripts/python/net_dropmonitor.py | 1258 | 1562 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
linecount = 0
for line in f:
linecount = linecount+1
f.seek(0)
except:
return
j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
j = j +1
if ((j % 100) == 0):
print "\r" + str(j) + "/" + str(linecount),
kallsyms.append({ 'loc': loc, 'name' : name})
print "\r" + str(j) + "/" + str(linecount)
kallsyms.sort()
return
def get_sym(sloc):
loc = int(sloc)
for i in kallsyms[::-1]:
if loc >= i['loc']:
return (i['name'], loc - i['loc'])
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
LittleBun/apm_planner | sik_uploader/sik_uploader.py | 13 | 9201 | #!/usr/bin/env python
#
# Serial firmware uploader for the SiK bootloader
#
import sys, argparse, binascii, serial, glob
class firmware(object):
'''Loads a firmware file'''
# parse a single IntelHex line and obtain the byte array and address
def __parseline(self, line):
# ignore lines not beginning with :
if (line[0] != ":"):
return;
# parse the header off the line
hexstr = line.rstrip()[1:-2]
binstr = binascii.unhexlify(hexstr)
command = ord(binstr[3])
# only type 0 records are interesting
if (command == 0):
address = (ord(binstr[1]) << 8) + ord(binstr[2])
bytes = bytearray(binstr[4:])
self.__insert(address, bytes)
# insert the byte array into the ranges dictionary, merging as we go
def __insert(self, address, bytes):
# look for a range that immediately follows this one
candidate = address + len(bytes)
if (candidate in self.ranges):
# found one, remove from ranges and merge it
nextbytes = self.ranges.pop(candidate)
bytes.extend(nextbytes)
# iterate the existing ranges looking for one that precedes this
for candidate in self.ranges.keys():
prevlen = len(self.ranges[candidate])
if ((candidate + prevlen) == address):
self.ranges[candidate].extend(bytes)
return
# just insert it
self.ranges[address] = bytes
def __init__(self, path):
self.ranges = dict()
# read the file
# XXX should have some file metadata here too ...
f = open(path, "r")
for line in f:
self.__parseline(line)
def code(self):
return self.ranges
class uploader(object):
'''Uploads a firmware file to the SiK bootloader'''
NOP = chr(0x00)
OK = chr(0x10)
FAILED = chr(0x11)
INSYNC = chr(0x12)
EOC = chr(0x20)
GET_SYNC = chr(0x21)
GET_DEVICE = chr(0x22)
CHIP_ERASE = chr(0x23)
LOAD_ADDRESS = chr(0x24)
PROG_FLASH = chr(0x25)
READ_FLASH = chr(0x26)
PROG_MULTI = chr(0x27)
READ_MULTI = chr(0x28)
PARAM_ERASE = chr(0x29)
REBOOT = chr(0x30)
PROG_MULTI_MAX = 32 # 64 causes serial hangs with some USB-serial adapters
READ_MULTI_MAX = 255
def __init__(self, portname, atbaudrate=57600):
print("Connecting to %s" % portname)
self.port = serial.Serial(portname, 115200, timeout=3)
self.atbaudrate = atbaudrate
def __send(self, c):
#print("send " + binascii.hexlify(c))
self.port.write(str(c))
def __recv(self):
c = self.port.read(1)
if (len(c) < 1):
raise RuntimeError("timeout waiting for data")
#print("recv " + binascii.hexlify(c))
return c
def __getSync(self):
c = self.__recv()
if (c != self.INSYNC):
raise RuntimeError("unexpected 0x%x instead of INSYNC (0x%x)" % (ord(c), ord(self.INSYNC)))
c = self.__recv()
if (c != self.OK):
raise RuntimeError("unexpected 0x%x instead of OK (0x%x)" % (ord(c), ord(self.OK)))
return True
# attempt to get back into sync with the bootloader
def __sync(self):
# send a stream of ignored bytes longer than the longest possible conversation
# that we might still have in progress
self.__send(uploader.NOP * (uploader.PROG_MULTI_MAX + 2))
self.port.flushInput()
self.__send(uploader.GET_SYNC
+ uploader.EOC)
return self.__getSync()
# send the CHIP_ERASE command and wait for the bootloader to become ready
def __erase(self, erase_params = False):
self.__send(uploader.CHIP_ERASE
+ uploader.EOC)
self.__getSync()
if (erase_params):
self.__send(uploader.PARAM_ERASE
+ uploader.EOC)
self.__getSync()
# send a LOAD_ADDRESS command
def __set_address(self, address):
self.__send(uploader.LOAD_ADDRESS
+ chr(address & 0xff)
+ chr(address >> 8)
+ uploader.EOC)
self.__getSync()
# send a PROG_FLASH command to program one byte
def __program(self, data):
self.__send(uploader.PROG_FLASH
+ chr(data)
+ uploader.EOC)
self.__getSync()
# send a PROG_MULTI command to write a collection of bytes
def __program_multi(self, data):
self.__send(uploader.PROG_MULTI
+ chr(len(data)))
self.__send(data)
self.__send(uploader.EOC)
self.__getSync()
# verify a byte in flash
def __verify(self, data):
self.__send(uploader.READ_FLASH
+ uploader.EOC)
if (self.__recv() != chr(data)):
return False
self.__getSync()
return True
# verify multiple bytes in flash
def __verify_multi(self, data):
self.__send(uploader.READ_MULTI
+ chr(len(data))
+ uploader.EOC)
for i in data:
if (self.__recv() != chr(i)):
return False
self.__getSync()
return True
# send the reboot command
def __reboot(self):
self.__send(uploader.REBOOT)
# split a sequence into a list of size-constrained pieces
def __split_len(self, seq, length):
return [seq[i:i+length] for i in range(0, len(seq), length)]
# upload code
def __program(self, fw):
code = fw.code()
for address in sorted(code.keys()):
self.__set_address(address)
groups = self.__split_len(code[address], uploader.PROG_MULTI_MAX)
for bytes in groups:
self.__program_multi(bytes)
# verify code
def __verify(self, fw):
code = fw.code()
for address in sorted(code.keys()):
self.__set_address(address)
groups = self.__split_len(code[address], uploader.READ_MULTI_MAX)
for bytes in groups:
if (not self.__verify_multi(bytes)):
raise RuntimeError("Verification failed in group at 0x%x" % address)
def autosync(self):
'''use AT&UPDATE to put modem in update mode'''
import fdpexpect, time
ser = fdpexpect.fdspawn(self.port.fileno(), logfile=sys.stdout)
if self.atbaudrate != 115200:
self.port.setBaudrate(self.atbaudrate)
print("Trying autosync")
ser.send('\r\n')
time.sleep(1.0)
ser.send('+++')
try:
ser.expect('OK', timeout=1.1)
except fdpexpect.TIMEOUT:
# may already be in AT mode
pass
for i in range(5):
ser.send('\r\nATI\r\n')
try:
ser.expect('SiK .* on', timeout=0.5)
ser.send('\r\n')
time.sleep(0.2)
ser.send('AT&UPDATE\r\n')
time.sleep(0.7)
if self.atbaudrate != 115200:
self.port.setBaudrate(115200)
return True
except fdpexpect.TIMEOUT:
continue
if self.atbaudrate != 115200:
self.port.setBaudrate(115200)
return False
# verify whether the bootloader is present and responding
def check(self):
for i in range(3):
try:
if self.__sync():
print("Got sync")
return True
self.autosync()
except RuntimeError:
self.autosync()
return False
def identify(self):
self.__send(uploader.GET_DEVICE
+ uploader.EOC)
board_id = ord(self.__recv()[0])
board_freq = ord(self.__recv()[0])
self.__getSync()
return board_id, board_freq
def upload(self, fw, erase_params = False):
print("erase...")
self.__erase(erase_params)
print("program...")
self.__program(fw)
print("verify...")
self.__verify(fw)
print("done.")
self.__reboot()
# Parse commandline arguments
parser = argparse.ArgumentParser(description="Firmware uploader for the SiK radio system.")
parser.add_argument('--port', action="store", help="port to upload to")
parser.add_argument('--resetparams', action="store_true", help="reset all parameters to defaults")
parser.add_argument("--baudrate", type=int, default=57600, help='baud rate')
parser.add_argument('firmware', action="store", help="Firmware file to be uploaded")
args = parser.parse_args()
# Load the firmware file
fw = firmware(args.firmware)
ports = glob.glob(args.port)
if not ports:
print("No matching ports for %s" % args.port)
sys.exit(1)
# Connect to the device and identify it
for port in glob.glob(args.port):
print("uploading to port %s" % port)
up = uploader(port, atbaudrate=args.baudrate)
if not up.check():
print("Failed to contact bootloader")
sys.exit(1)
id, freq = up.identify()
print("board %x freq %x" % (id, freq))
up.upload(fw,args.resetparams)
| agpl-3.0 |
proffalken/cobbler | cobbler/action_hardlink.py | 10 | 1914 | """
Hard links cobbler content together to save space.
Copyright 2009, Red Hat, Inc
Michael DeHaan <mdehaan@redhat.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import os
import utils
from cexceptions import *
import clogger
class HardLinker:
def __init__(self,config,logger=None):
"""
Constructor
"""
#self.config = config
#self.api = config.api
#self.settings = config.settings()
if logger is None:
logger = clogger.Logger()
self.logger = logger
def run(self):
"""
Simply hardlinks directories that are cobbler managed.
This is a /very/ simple command but may grow more complex
and intelligent over time.
"""
# FIXME: if these directories become configurable some
# changes will be required here.
if not os.path.exists("/usr/sbin/hardlink"):
utils.die(self.logger,"please install 'hardlink' (/usr/sbin/hardlink) to use this feature")
self.logger.info("now hardlinking to save space, this may take some time.")
rc = utils.subprocess_call(self.logger,"/usr/sbin/hardlink -c -v /var/www/cobbler/ks_mirror /var/www/cobbler/repo_mirror",shell=True)
return rc
| gpl-2.0 |
open-synergy/stock-logistics-warehouse | partner_location_auto_create/models/res_partner.py | 2 | 5187 | # coding: utf-8
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2015 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, _
class ResPartner(models.Model):
_inherit = 'res.partner'
locations_count = fields.Integer(
compute='_compute_locations_count', store=False)
location_ids = fields.One2many(
'stock.location', 'partner_id', string='Locations')
@api.depends('location_ids')
def _compute_locations_count(self):
for rec in self:
rec.locations_count = len(rec.location_ids)
@api.multi
def button_locations(self):
self.ensure_one()
res = {
'name': _('Locations'),
'type': 'ir.actions.act_window',
'res_model': 'stock.location',
'view_type': 'form',
}
if len(self.location_ids) == 1:
res['res_id'] = self.location_ids.id
res['view_mode'] = 'form'
else:
res['domain'] = [('partner_id', '=', self.id)]
res['view_mode'] = 'tree,form'
return res
@api.multi
def get_main_location(self, usage):
self.ensure_one()
return self.location_ids.filtered(
lambda l: l.usage == usage and l.main_partner_location)
def _create_main_partner_location(self):
self.ensure_one()
if self.customer and self.property_stock_customer.partner_id != self:
location_customer = (
self.get_main_location('customer') or
self._create_main_location('customer'))
self.write({'property_stock_customer': location_customer})
if self.supplier and self.property_stock_supplier.partner_id != self:
location_supplier = (
self.get_main_location('supplier') or
self._create_main_location('supplier'))
self.write({'property_stock_supplier': location_supplier})
@api.multi
def _create_main_location(self, usage):
self.ensure_one()
parent = (
self.get_main_location(usage) or
self.company_id.get_default_location(usage)
)
return self.env['stock.location'].create({
'name': self.name,
'usage': usage,
'partner_id': self.id,
'company_id': self.company_id.id,
'location_id': parent.id,
'main_partner_location': True,
})
def _remove_locations(self):
"""
Unlink all locations related to the partner
where no stock have been moved.
This is required to prevent unrequired locations to
be created when a new partner is tagged as a company
by mistake.
"""
move_obj = self.env['stock.move']
for location in self.mapped('location_ids'):
moves = move_obj.search([
'|',
('location_id', 'child_of', location.id),
('location_dest_id', 'child_of', location.id),
])
if not moves:
location.unlink()
@api.model
def create(self, vals):
""" The first time a partner is created, a main customer
and / or supplier location is created for this partner """
partner = super(ResPartner, self).create(vals)
if vals.get('is_company', False):
partner._create_main_partner_location()
return partner
@api.multi
def write(self, vals):
if vals.get('name'):
for partner in self:
locations = partner.location_ids.filtered(
lambda l: l.name == partner.name)
locations.write({'name': vals.get('name')})
res = super(ResPartner, self).write(vals)
if (
vals.get('is_company') or
vals.get('customer') or
vals.get('supplier')
):
for partner in self.filtered('is_company'):
partner._create_main_partner_location()
if 'is_company' in vals and not vals['is_company']:
# When False is written to field 'is_company'
self._remove_locations()
if 'active' in vals:
self.location_ids.write({'active': vals['active']})
return res
| agpl-3.0 |
yigitguler/django | django/contrib/messages/tests/base.py | 14 | 14035 | from unittest import skipUnless
from django import http
from django.apps import apps
from django.conf import global_settings
from django.contrib.messages import constants, utils, get_level, set_level
from django.contrib.messages.api import MessageFailure
from django.contrib.messages.constants import DEFAULT_LEVELS
from django.contrib.messages.storage import default_storage, base
from django.contrib.messages.storage.base import Message
from django.core.urlresolvers import reverse
from django.test import modify_settings, override_settings
from django.utils.translation import ugettext_lazy
def skipUnlessAuthIsInstalled(func):
return skipUnless(
apps.is_installed('django.contrib.auth'),
"django.contrib.auth isn't installed")(func)
def add_level_messages(storage):
"""
Adds 6 messages from different levels (including a custom one) to a storage
instance.
"""
storage.add(constants.INFO, 'A generic info message')
storage.add(29, 'Some custom level')
storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag')
storage.add(constants.WARNING, 'A warning')
storage.add(constants.ERROR, 'An error')
storage.add(constants.SUCCESS, 'This was a triumph.')
class override_settings_tags(override_settings):
def enable(self):
super(override_settings_tags, self).enable()
# LEVEL_TAGS is a constant defined in the
# django.contrib.messages.storage.base module, so after changing
# settings.MESSAGE_TAGS, we need to update that constant too.
self.old_level_tags = base.LEVEL_TAGS
base.LEVEL_TAGS = utils.get_level_tags()
def disable(self):
super(override_settings_tags, self).disable()
base.LEVEL_TAGS = self.old_level_tags
class BaseTests(object):
storage_class = default_storage
levels = {
'debug': constants.DEBUG,
'info': constants.INFO,
'success': constants.SUCCESS,
'warning': constants.WARNING,
'error': constants.ERROR,
}
def setUp(self):
self.settings_override = override_settings_tags(
TEMPLATE_DIRS=(),
TEMPLATE_CONTEXT_PROCESSORS=global_settings.TEMPLATE_CONTEXT_PROCESSORS,
ROOT_URLCONF='django.contrib.messages.tests.urls',
MESSAGE_TAGS='',
MESSAGE_STORAGE='%s.%s' % (self.storage_class.__module__,
self.storage_class.__name__),
SESSION_SERIALIZER='django.contrib.sessions.serializers.JSONSerializer',
)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def get_request(self):
return http.HttpRequest()
def get_response(self):
return http.HttpResponse()
def get_storage(self, data=None):
"""
Returns the storage backend, setting its loaded data to the ``data``
argument.
This method avoids the storage ``_get`` method from getting called so
that other parts of the storage backend can be tested independent of
the message retrieval logic.
"""
storage = self.storage_class(self.get_request())
storage._loaded_data = data or []
return storage
def test_add(self):
storage = self.get_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 1')
self.assertTrue(storage.added_new)
storage.add(constants.INFO, 'Test message 2', extra_tags='tag')
self.assertEqual(len(storage), 2)
def test_add_lazy_translation(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, ugettext_lazy('lazy message'))
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_no_update(self):
storage = self.get_storage()
response = self.get_response()
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_add_update(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 1')
storage.add(constants.INFO, 'Test message 1', extra_tags='tag')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 2)
def test_existing_add_read_update(self):
storage = self.get_existing_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 3')
list(storage) # Simulates a read
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_existing_read_add_update(self):
storage = self.get_existing_storage()
response = self.get_response()
list(storage) # Simulates a read
storage.add(constants.INFO, 'Test message 3')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_full_request_response_cycle(self):
"""
With the message middleware enabled, tests that messages are properly
stored and then retrieved across the full request/redirect/response
cycle.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
messages = [Message(self.levels[level], msg) for msg in data['messages']]
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_with_template_response(self):
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_template_response')
for level in self.levels.keys():
add_url = reverse('add_template_response', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
for msg in data['messages']:
self.assertContains(response, msg)
# there shouldn't be any messages on second GET request
response = self.client.get(show_url)
for msg in data['messages']:
self.assertNotContains(response, msg)
def test_context_processor_message_levels(self):
show_url = reverse('show_template_response')
response = self.client.get(show_url)
self.assertIn('DEFAULT_MESSAGE_LEVELS', response.context)
self.assertEqual(response.context['DEFAULT_MESSAGE_LEVELS'], DEFAULT_LEVELS)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_multiple_posts(self):
"""
Tests that messages persist properly when multiple POSTs are made
before a GET.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
messages = []
for level in ('debug', 'info', 'success', 'warning', 'error'):
messages.extend([Message(self.levels[level], msg) for msg in data['messages']])
add_url = reverse('add_message', args=(level,))
self.client.post(add_url, data)
response = self.client.get(show_url)
self.assertIn('messages', response.context)
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE_CLASSES={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
TEMPLATE_CONTEXT_PROCESSORS={'remove': 'django.contrib.messages.context_processors.messages'},
)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_middleware_disabled(self):
"""
Tests that, when the middleware is disabled, an exception is raised
when one attempts to store a message.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
self.assertRaises(MessageFailure, self.client.post, add_url,
data, follow=True)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE_CLASSES={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
TEMPLATE_CONTEXT_PROCESSORS={'remove': 'django.contrib.messages.context_processors.messages'},
)
def test_middleware_disabled_fail_silently(self):
"""
Tests that, when the middleware is disabled, an exception is not
raised if 'fail_silently' = True
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
'fail_silently': True,
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertNotIn('messages', response.context)
def stored_messages_count(self, storage, response):
"""
Returns the number of messages being stored after a
``storage.update()`` call.
"""
raise NotImplementedError('This method must be set by a subclass.')
def test_get(self):
raise NotImplementedError('This method must be set by a subclass.')
def get_existing_storage(self):
return self.get_storage([Message(constants.INFO, 'Test message 1'),
Message(constants.INFO, 'Test message 2',
extra_tags='tag')])
def test_existing_read(self):
"""
Tests that reading the existing storage doesn't cause the data to be
lost.
"""
storage = self.get_existing_storage()
self.assertFalse(storage.used)
# After iterating the storage engine directly, the used flag is set.
data = list(storage)
self.assertTrue(storage.used)
# The data does not disappear because it has been iterated.
self.assertEqual(data, list(storage))
def test_existing_add(self):
storage = self.get_existing_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 3')
self.assertTrue(storage.added_new)
def test_default_level(self):
# get_level works even with no storage on the request.
request = self.get_request()
self.assertEqual(get_level(request), constants.INFO)
# get_level returns the default level if it hasn't been set.
storage = self.get_storage()
request._messages = storage
self.assertEqual(get_level(request), constants.INFO)
# Only messages of sufficient level get recorded.
add_level_messages(storage)
self.assertEqual(len(storage), 5)
def test_low_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 5))
self.assertEqual(get_level(request), 5)
add_level_messages(storage)
self.assertEqual(len(storage), 6)
def test_high_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 30))
self.assertEqual(get_level(request), 30)
add_level_messages(storage)
self.assertEqual(len(storage), 2)
@override_settings(MESSAGE_LEVEL=29)
def test_settings_level(self):
request = self.get_request()
storage = self.storage_class(request)
self.assertEqual(get_level(request), 29)
add_level_messages(storage)
self.assertEqual(len(storage), 3)
def test_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', '', 'extra-tag debug', 'warning', 'error',
'success'])
def test_level_tag(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.level_tag for msg in storage]
self.assertEqual(tags,
['info', '', 'debug', 'warning', 'error',
'success'])
@override_settings_tags(MESSAGE_TAGS={
constants.INFO: 'info',
constants.DEBUG: '',
constants.WARNING: '',
constants.ERROR: 'bad',
29: 'custom',
}
)
def test_custom_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', 'custom', 'extra-tag', '', 'bad', 'success'])
| bsd-3-clause |
ojengwa/oh-mainline | vendor/packages/twisted/twisted/web/static.py | 20 | 35712 | # -*- test-case-name: twisted.web.test.test_static -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Static resources for L{twisted.web}.
"""
import os
import warnings
import urllib
import itertools
import cgi
import time
from zope.interface import implements
from twisted.web import server
from twisted.web import resource
from twisted.web import http
from twisted.web.util import redirectTo
from twisted.python import components, filepath, log
from twisted.internet import abstract, interfaces
from twisted.spread import pb
from twisted.persisted import styles
from twisted.python.util import InsensitiveDict
from twisted.python.runtime import platformType
dangerousPathError = resource.NoResource("Invalid request URL.")
def isDangerous(path):
return path == '..' or '/' in path or os.sep in path
class Data(resource.Resource):
"""
This is a static, in-memory resource.
"""
def __init__(self, data, type):
resource.Resource.__init__(self)
self.data = data
self.type = type
def render_GET(self, request):
request.setHeader("content-type", self.type)
request.setHeader("content-length", str(len(self.data)))
if request.method == "HEAD":
return ''
return self.data
render_HEAD = render_GET
def addSlash(request):
qs = ''
qindex = request.uri.find('?')
if qindex != -1:
qs = request.uri[qindex:]
return "http%s://%s%s/%s" % (
request.isSecure() and 's' or '',
request.getHeader("host"),
(request.uri.split('?')[0]),
qs)
class Redirect(resource.Resource):
def __init__(self, request):
resource.Resource.__init__(self)
self.url = addSlash(request)
def render(self, request):
return redirectTo(self.url, request)
class Registry(components.Componentized, styles.Versioned):
"""
I am a Componentized object that will be made available to internal Twisted
file-based dynamic web content such as .rpy and .epy scripts.
"""
def __init__(self):
components.Componentized.__init__(self)
self._pathCache = {}
persistenceVersion = 1
def upgradeToVersion1(self):
self._pathCache = {}
def cachePath(self, path, rsrc):
self._pathCache[path] = rsrc
def getCachedPath(self, path):
return self._pathCache.get(path)
def loadMimeTypes(mimetype_locations=['/etc/mime.types']):
"""
Multiple file locations containing mime-types can be passed as a list.
The files will be sourced in that order, overriding mime-types from the
files sourced beforehand, but only if a new entry explicitly overrides
the current entry.
"""
import mimetypes
# Grab Python's built-in mimetypes dictionary.
contentTypes = mimetypes.types_map
# Update Python's semi-erroneous dictionary with a few of the
# usual suspects.
contentTypes.update(
{
'.conf': 'text/plain',
'.diff': 'text/plain',
'.exe': 'application/x-executable',
'.flac': 'audio/x-flac',
'.java': 'text/plain',
'.ogg': 'application/ogg',
'.oz': 'text/x-oz',
'.swf': 'application/x-shockwave-flash',
'.tgz': 'application/x-gtar',
'.wml': 'text/vnd.wap.wml',
'.xul': 'application/vnd.mozilla.xul+xml',
'.py': 'text/plain',
'.patch': 'text/plain',
}
)
# Users can override these mime-types by loading them out configuration
# files (this defaults to ['/etc/mime.types']).
for location in mimetype_locations:
if os.path.exists(location):
more = mimetypes.read_mime_types(location)
if more is not None:
contentTypes.update(more)
return contentTypes
def getTypeAndEncoding(filename, types, encodings, defaultType):
p, ext = os.path.splitext(filename)
ext = ext.lower()
if encodings.has_key(ext):
enc = encodings[ext]
ext = os.path.splitext(p)[1].lower()
else:
enc = None
type = types.get(ext, defaultType)
return type, enc
class File(resource.Resource, styles.Versioned, filepath.FilePath):
"""
File is a resource that represents a plain non-interpreted file
(although it can look for an extension like .rpy or .cgi and hand the
file to a processor for interpretation if you wish). Its constructor
takes a file path.
Alternatively, you can give a directory path to the constructor. In this
case the resource will represent that directory, and its children will
be files underneath that directory. This provides access to an entire
filesystem tree with a single Resource.
If you map the URL 'http://server/FILE' to a resource created as
File('/tmp'), then http://server/FILE/ will return an HTML-formatted
listing of the /tmp/ directory, and http://server/FILE/foo/bar.html will
return the contents of /tmp/foo/bar.html .
@cvar childNotFound: L{Resource} used to render 404 Not Found error pages.
"""
contentTypes = loadMimeTypes()
contentEncodings = {
".gz" : "gzip",
".bz2": "bzip2"
}
processors = {}
indexNames = ["index", "index.html", "index.htm", "index.rpy"]
type = None
### Versioning
persistenceVersion = 6
def upgradeToVersion6(self):
self.ignoredExts = []
if self.allowExt:
self.ignoreExt("*")
del self.allowExt
def upgradeToVersion5(self):
if not isinstance(self.registry, Registry):
self.registry = Registry()
def upgradeToVersion4(self):
if not hasattr(self, 'registry'):
self.registry = {}
def upgradeToVersion3(self):
if not hasattr(self, 'allowExt'):
self.allowExt = 0
def upgradeToVersion2(self):
self.defaultType = "text/html"
def upgradeToVersion1(self):
if hasattr(self, 'indexName'):
self.indexNames = [self.indexName]
del self.indexName
def __init__(self, path, defaultType="text/html", ignoredExts=(), registry=None, allowExt=0):
"""
Create a file with the given path.
@param path: The filename of the file from which this L{File} will
serve data.
@type path: C{str}
@param defaultType: A I{major/minor}-style MIME type specifier
indicating the I{Content-Type} with which this L{File}'s data
will be served if a MIME type cannot be determined based on
C{path}'s extension.
@type defaultType: C{str}
@param ignoredExts: A sequence giving the extensions of paths in the
filesystem which will be ignored for the purposes of child
lookup. For example, if C{ignoredExts} is C{(".bar",)} and
C{path} is a directory containing a file named C{"foo.bar"}, a
request for the C{"foo"} child of this resource will succeed
with a L{File} pointing to C{"foo.bar"}.
@param registry: The registry object being used to handle this
request. If C{None}, one will be created.
@type registry: L{Registry}
@param allowExt: Ignored parameter, only present for backwards
compatibility. Do not pass a value for this parameter.
"""
resource.Resource.__init__(self)
filepath.FilePath.__init__(self, path)
self.defaultType = defaultType
if ignoredExts in (0, 1) or allowExt:
warnings.warn("ignoredExts should receive a list, not a boolean")
if ignoredExts or allowExt:
self.ignoredExts = ['*']
else:
self.ignoredExts = []
else:
self.ignoredExts = list(ignoredExts)
self.registry = registry or Registry()
def ignoreExt(self, ext):
"""Ignore the given extension.
Serve file.ext if file is requested
"""
self.ignoredExts.append(ext)
childNotFound = resource.NoResource("File not found.")
def directoryListing(self):
return DirectoryLister(self.path,
self.listNames(),
self.contentTypes,
self.contentEncodings,
self.defaultType)
def getChild(self, path, request):
"""
If this L{File}'s path refers to a directory, return a L{File}
referring to the file named C{path} in that directory.
If C{path} is the empty string, return a L{DirectoryLister} instead.
"""
self.restat(reraise=False)
if not self.isdir():
return self.childNotFound
if path:
try:
fpath = self.child(path)
except filepath.InsecurePath:
return self.childNotFound
else:
fpath = self.childSearchPreauth(*self.indexNames)
if fpath is None:
return self.directoryListing()
if not fpath.exists():
fpath = fpath.siblingExtensionSearch(*self.ignoredExts)
if fpath is None:
return self.childNotFound
if platformType == "win32":
# don't want .RPY to be different than .rpy, since that would allow
# source disclosure.
processor = InsensitiveDict(self.processors).get(fpath.splitext()[1])
else:
processor = self.processors.get(fpath.splitext()[1])
if processor:
return resource.IResource(processor(fpath.path, self.registry))
return self.createSimilarFile(fpath.path)
# methods to allow subclasses to e.g. decrypt files on the fly:
def openForReading(self):
"""Open a file and return it."""
return self.open()
def getFileSize(self):
"""Return file size."""
return self.getsize()
def _parseRangeHeader(self, range):
"""
Parse the value of a Range header into (start, stop) pairs.
In a given pair, either of start or stop can be None, signifying that
no value was provided, but not both.
@return: A list C{[(start, stop)]} of pairs of length at least one.
@raise ValueError: if the header is syntactically invalid or if the
Bytes-Unit is anything other than 'bytes'.
"""
try:
kind, value = range.split('=', 1)
except ValueError:
raise ValueError("Missing '=' separator")
kind = kind.strip()
if kind != 'bytes':
raise ValueError("Unsupported Bytes-Unit: %r" % (kind,))
unparsedRanges = filter(None, map(str.strip, value.split(',')))
parsedRanges = []
for byteRange in unparsedRanges:
try:
start, end = byteRange.split('-', 1)
except ValueError:
raise ValueError("Invalid Byte-Range: %r" % (byteRange,))
if start:
try:
start = int(start)
except ValueError:
raise ValueError("Invalid Byte-Range: %r" % (byteRange,))
else:
start = None
if end:
try:
end = int(end)
except ValueError:
raise ValueError("Invalid Byte-Range: %r" % (byteRange,))
else:
end = None
if start is not None:
if end is not None and start > end:
# Start must be less than or equal to end or it is invalid.
raise ValueError("Invalid Byte-Range: %r" % (byteRange,))
elif end is None:
# One or both of start and end must be specified. Omitting
# both is invalid.
raise ValueError("Invalid Byte-Range: %r" % (byteRange,))
parsedRanges.append((start, end))
return parsedRanges
def _rangeToOffsetAndSize(self, start, end):
"""
Convert a start and end from a Range header to an offset and size.
This method checks that the resulting range overlaps with the resource
being served (and so has the value of C{getFileSize()} as an indirect
input).
Either but not both of start or end can be C{None}:
- Omitted start means that the end value is actually a start value
relative to the end of the resource.
- Omitted end means the end of the resource should be the end of
the range.
End is interpreted as inclusive, as per RFC 2616.
If this range doesn't overlap with any of this resource, C{(0, 0)} is
returned, which is not otherwise a value return value.
@param start: The start value from the header, or C{None} if one was
not present.
@param end: The end value from the header, or C{None} if one was not
present.
@return: C{(offset, size)} where offset is how far into this resource
this resource the range begins and size is how long the range is,
or C{(0, 0)} if the range does not overlap this resource.
"""
size = self.getFileSize()
if start is None:
start = size - end
end = size
elif end is None:
end = size
elif end < size:
end += 1
elif end > size:
end = size
if start >= size:
start = end = 0
return start, (end - start)
def _contentRange(self, offset, size):
"""
Return a string suitable for the value of a Content-Range header for a
range with the given offset and size.
The offset and size are not sanity checked in any way.
@param offset: How far into this resource the range begins.
@param size: How long the range is.
@return: The value as appropriate for the value of a Content-Range
header.
"""
return 'bytes %d-%d/%d' % (
offset, offset + size - 1, self.getFileSize())
def _doSingleRangeRequest(self, request, (start, end)):
"""
Set up the response for Range headers that specify a single range.
This method checks if the request is satisfiable and sets the response
code and Content-Range header appropriately. The return value
indicates which part of the resource to return.
@param request: The Request object.
@param start: The start of the byte range as specified by the header.
@param end: The end of the byte range as specified by the header. At
most one of C{start} and C{end} may be C{None}.
@return: A 2-tuple of the offset and size of the range to return.
offset == size == 0 indicates that the request is not satisfiable.
"""
offset, size = self._rangeToOffsetAndSize(start, end)
if offset == size == 0:
# This range doesn't overlap with any of this resource, so the
# request is unsatisfiable.
request.setResponseCode(http.REQUESTED_RANGE_NOT_SATISFIABLE)
request.setHeader(
'content-range', 'bytes */%d' % (self.getFileSize(),))
else:
request.setResponseCode(http.PARTIAL_CONTENT)
request.setHeader(
'content-range', self._contentRange(offset, size))
return offset, size
def _doMultipleRangeRequest(self, request, byteRanges):
"""
Set up the response for Range headers that specify a single range.
This method checks if the request is satisfiable and sets the response
code and Content-Type and Content-Length headers appropriately. The
return value, which is a little complicated, indicates which parts of
the resource to return and the boundaries that should separate the
parts.
In detail, the return value is a tuple rangeInfo C{rangeInfo} is a
list of 3-tuples C{(partSeparator, partOffset, partSize)}. The
response to this request should be, for each element of C{rangeInfo},
C{partSeparator} followed by C{partSize} bytes of the resource
starting at C{partOffset}. Each C{partSeparator} includes the
MIME-style boundary and the part-specific Content-type and
Content-range headers. It is convenient to return the separator as a
concrete string from this method, becasue this method needs to compute
the number of bytes that will make up the response to be able to set
the Content-Length header of the response accurately.
@param request: The Request object.
@param byteRanges: A list of C{(start, end)} values as specified by
the header. For each range, at most one of C{start} and C{end}
may be C{None}.
@return: See above.
"""
matchingRangeFound = False
rangeInfo = []
contentLength = 0
boundary = "%x%x" % (int(time.time()*1000000), os.getpid())
if self.type:
contentType = self.type
else:
contentType = 'bytes' # It's what Apache does...
for start, end in byteRanges:
partOffset, partSize = self._rangeToOffsetAndSize(start, end)
if partOffset == partSize == 0:
continue
contentLength += partSize
matchingRangeFound = True
partContentRange = self._contentRange(partOffset, partSize)
partSeparator = (
"\r\n"
"--%s\r\n"
"Content-type: %s\r\n"
"Content-range: %s\r\n"
"\r\n") % (boundary, contentType, partContentRange)
contentLength += len(partSeparator)
rangeInfo.append((partSeparator, partOffset, partSize))
if not matchingRangeFound:
request.setResponseCode(http.REQUESTED_RANGE_NOT_SATISFIABLE)
request.setHeader(
'content-length', '0')
request.setHeader(
'content-range', 'bytes */%d' % (self.getFileSize(),))
return [], ''
finalBoundary = "\r\n--" + boundary + "--\r\n"
rangeInfo.append((finalBoundary, 0, 0))
request.setResponseCode(http.PARTIAL_CONTENT)
request.setHeader(
'content-type', 'multipart/byteranges; boundary="%s"' % (boundary,))
request.setHeader(
'content-length', contentLength + len(finalBoundary))
return rangeInfo
def _setContentHeaders(self, request, size=None):
"""
Set the Content-length and Content-type headers for this request.
This method is not appropriate for requests for multiple byte ranges;
L{_doMultipleRangeRequest} will set these headers in that case.
@param request: The L{Request} object.
@param size: The size of the response. If not specified, default to
C{self.getFileSize()}.
"""
if size is None:
size = self.getFileSize()
request.setHeader('content-length', str(size))
if self.type:
request.setHeader('content-type', self.type)
if self.encoding:
request.setHeader('content-encoding', self.encoding)
def makeProducer(self, request, fileForReading):
"""
Make a L{StaticProducer} that will produce the body of this response.
This method will also set the response code and Content-* headers.
@param request: The L{Request} object.
@param fileForReading: The file object containing the resource.
@return: A L{StaticProducer}. Calling C{.start()} on this will begin
producing the response.
"""
byteRange = request.getHeader('range')
if byteRange is None:
self._setContentHeaders(request)
request.setResponseCode(http.OK)
return NoRangeStaticProducer(request, fileForReading)
try:
parsedRanges = self._parseRangeHeader(byteRange)
except ValueError:
log.msg("Ignoring malformed Range header %r" % (byteRange,))
self._setContentHeaders(request)
request.setResponseCode(http.OK)
return NoRangeStaticProducer(request, fileForReading)
if len(parsedRanges) == 1:
offset, size = self._doSingleRangeRequest(
request, parsedRanges[0])
self._setContentHeaders(request, size)
return SingleRangeStaticProducer(
request, fileForReading, offset, size)
else:
rangeInfo = self._doMultipleRangeRequest(request, parsedRanges)
return MultipleRangeStaticProducer(
request, fileForReading, rangeInfo)
def render_GET(self, request):
"""
Begin sending the contents of this L{File} (or a subset of the
contents, based on the 'range' header) to the given request.
"""
self.restat(False)
if self.type is None:
self.type, self.encoding = getTypeAndEncoding(self.basename(),
self.contentTypes,
self.contentEncodings,
self.defaultType)
if not self.exists():
return self.childNotFound.render(request)
if self.isdir():
return self.redirect(request)
request.setHeader('accept-ranges', 'bytes')
try:
fileForReading = self.openForReading()
except IOError, e:
import errno
if e[0] == errno.EACCES:
return resource.ForbiddenResource().render(request)
else:
raise
if request.setLastModified(self.getmtime()) is http.CACHED:
return ''
producer = self.makeProducer(request, fileForReading)
if request.method == 'HEAD':
return ''
producer.start()
# and make sure the connection doesn't get closed
return server.NOT_DONE_YET
render_HEAD = render_GET
def redirect(self, request):
return redirectTo(addSlash(request), request)
def listNames(self):
if not self.isdir():
return []
directory = self.listdir()
directory.sort()
return directory
def listEntities(self):
return map(lambda fileName, self=self: self.createSimilarFile(os.path.join(self.path, fileName)), self.listNames())
def createSimilarFile(self, path):
f = self.__class__(path, self.defaultType, self.ignoredExts, self.registry)
# refactoring by steps, here - constructor should almost certainly take these
f.processors = self.processors
f.indexNames = self.indexNames[:]
f.childNotFound = self.childNotFound
return f
class StaticProducer(object):
"""
Superclass for classes that implement the business of producing.
@ivar request: The L{IRequest} to write the contents of the file to.
@ivar fileObject: The file the contents of which to write to the request.
"""
implements(interfaces.IPullProducer)
bufferSize = abstract.FileDescriptor.bufferSize
def __init__(self, request, fileObject):
"""
Initialize the instance.
"""
self.request = request
self.fileObject = fileObject
def start(self):
raise NotImplementedError(self.start)
def resumeProducing(self):
raise NotImplementedError(self.resumeProducing)
def stopProducing(self):
"""
Stop producing data.
L{IPullProducer.stopProducing} is called when our consumer has died,
and subclasses also call this method when they are done producing
data.
"""
self.fileObject.close()
self.request = None
class NoRangeStaticProducer(StaticProducer):
"""
A L{StaticProducer} that writes the entire file to the request.
"""
def start(self):
self.request.registerProducer(self, False)
def resumeProducing(self):
if not self.request:
return
data = self.fileObject.read(self.bufferSize)
if data:
# this .write will spin the reactor, calling .doWrite and then
# .resumeProducing again, so be prepared for a re-entrant call
self.request.write(data)
else:
self.request.unregisterProducer()
self.request.finish()
self.stopProducing()
class SingleRangeStaticProducer(StaticProducer):
"""
A L{StaticProducer} that writes a single chunk of a file to the request.
"""
def __init__(self, request, fileObject, offset, size):
"""
Initialize the instance.
@param request: See L{StaticProducer}.
@param fileObject: See L{StaticProducer}.
@param offset: The offset into the file of the chunk to be written.
@param size: The size of the chunk to write.
"""
StaticProducer.__init__(self, request, fileObject)
self.offset = offset
self.size = size
def start(self):
self.fileObject.seek(self.offset)
self.bytesWritten = 0
self.request.registerProducer(self, 0)
def resumeProducing(self):
if not self.request:
return
data = self.fileObject.read(
min(self.bufferSize, self.size - self.bytesWritten))
if data:
self.bytesWritten += len(data)
# this .write will spin the reactor, calling .doWrite and then
# .resumeProducing again, so be prepared for a re-entrant call
self.request.write(data)
if self.request and self.bytesWritten == self.size:
self.request.unregisterProducer()
self.request.finish()
self.stopProducing()
class MultipleRangeStaticProducer(StaticProducer):
"""
A L{StaticProducer} that writes several chunks of a file to the request.
"""
def __init__(self, request, fileObject, rangeInfo):
"""
Initialize the instance.
@param request: See L{StaticProducer}.
@param fileObject: See L{StaticProducer}.
@param rangeInfo: A list of tuples C{[(boundary, offset, size)]}
where:
- C{boundary} will be written to the request first.
- C{offset} the offset into the file of chunk to write.
- C{size} the size of the chunk to write.
"""
StaticProducer.__init__(self, request, fileObject)
self.rangeInfo = rangeInfo
def start(self):
self.rangeIter = iter(self.rangeInfo)
self._nextRange()
self.request.registerProducer(self, 0)
def _nextRange(self):
self.partBoundary, partOffset, self._partSize = self.rangeIter.next()
self._partBytesWritten = 0
self.fileObject.seek(partOffset)
def resumeProducing(self):
if not self.request:
return
data = []
dataLength = 0
done = False
while dataLength < self.bufferSize:
if self.partBoundary:
dataLength += len(self.partBoundary)
data.append(self.partBoundary)
self.partBoundary = None
p = self.fileObject.read(
min(self.bufferSize - dataLength,
self._partSize - self._partBytesWritten))
self._partBytesWritten += len(p)
dataLength += len(p)
data.append(p)
if self.request and self._partBytesWritten == self._partSize:
try:
self._nextRange()
except StopIteration:
done = True
break
self.request.write(''.join(data))
if done:
self.request.unregisterProducer()
self.request.finish()
self.request = None
class FileTransfer(pb.Viewable):
"""
A class to represent the transfer of a file over the network.
"""
request = None
def __init__(self, file, size, request):
warnings.warn(
"FileTransfer is deprecated since Twisted 9.0. "
"Use a subclass of StaticProducer instead.",
DeprecationWarning, stacklevel=2)
self.file = file
self.size = size
self.request = request
self.written = self.file.tell()
request.registerProducer(self, 0)
def resumeProducing(self):
if not self.request:
return
data = self.file.read(min(abstract.FileDescriptor.bufferSize, self.size - self.written))
if data:
self.written += len(data)
# this .write will spin the reactor, calling .doWrite and then
# .resumeProducing again, so be prepared for a re-entrant call
self.request.write(data)
if self.request and self.file.tell() == self.size:
self.request.unregisterProducer()
self.request.finish()
self.request = None
def pauseProducing(self):
pass
def stopProducing(self):
self.file.close()
self.request = None
# Remotely relay producer interface.
def view_resumeProducing(self, issuer):
self.resumeProducing()
def view_pauseProducing(self, issuer):
self.pauseProducing()
def view_stopProducing(self, issuer):
self.stopProducing()
class ASISProcessor(resource.Resource):
"""
Serve files exactly as responses without generating a status-line or any
headers. Inspired by Apache's mod_asis.
"""
def __init__(self, path, registry=None):
resource.Resource.__init__(self)
self.path = path
self.registry = registry or Registry()
def render(self, request):
request.startedWriting = 1
res = File(self.path, registry=self.registry)
return res.render(request)
def formatFileSize(size):
"""
Format the given file size in bytes to human readable format.
"""
if size < 1024:
return '%iB' % size
elif size < (1024 ** 2):
return '%iK' % (size / 1024)
elif size < (1024 ** 3):
return '%iM' % (size / (1024 ** 2))
else:
return '%iG' % (size / (1024 ** 3))
class DirectoryLister(resource.Resource):
"""
Print the content of a directory.
@ivar template: page template used to render the content of the directory.
It must contain the format keys B{header} and B{tableContent}.
@type template: C{str}
@ivar linePattern: template used to render one line in the listing table.
It must contain the format keys B{class}, B{href}, B{text}, B{size},
B{type} and B{encoding}.
@type linePattern: C{str}
@ivar contentEncodings: a mapping of extensions to encoding types.
@type contentEncodings: C{dict}
@ivar defaultType: default type used when no mimetype is detected.
@type defaultType: C{str}
@ivar dirs: filtered content of C{path}, if the whole content should not be
displayed (default to C{None}, which means the actual content of
C{path} is printed).
@type dirs: C{NoneType} or C{list}
@ivar path: directory which content should be listed.
@type path: C{str}
"""
template = """<html>
<head>
<title>%(header)s</title>
<style>
.even-dir { background-color: #efe0ef }
.even { background-color: #eee }
.odd-dir {background-color: #f0d0ef }
.odd { background-color: #dedede }
.icon { text-align: center }
.listing {
margin-left: auto;
margin-right: auto;
width: 50%%;
padding: 0.1em;
}
body { border: 0; padding: 0; margin: 0; background-color: #efefef; }
h1 {padding: 0.1em; background-color: #777; color: white; border-bottom: thin white dashed;}
</style>
</head>
<body>
<h1>%(header)s</h1>
<table>
<thead>
<tr>
<th>Filename</th>
<th>Size</th>
<th>Content type</th>
<th>Content encoding</th>
</tr>
</thead>
<tbody>
%(tableContent)s
</tbody>
</table>
</body>
</html>
"""
linePattern = """<tr class="%(class)s">
<td><a href="%(href)s">%(text)s</a></td>
<td>%(size)s</td>
<td>%(type)s</td>
<td>%(encoding)s</td>
</tr>
"""
def __init__(self, pathname, dirs=None,
contentTypes=File.contentTypes,
contentEncodings=File.contentEncodings,
defaultType='text/html'):
resource.Resource.__init__(self)
self.contentTypes = contentTypes
self.contentEncodings = contentEncodings
self.defaultType = defaultType
# dirs allows usage of the File to specify what gets listed
self.dirs = dirs
self.path = pathname
def _getFilesAndDirectories(self, directory):
"""
Helper returning files and directories in given directory listing, with
attributes to be used to build a table content with
C{self.linePattern}.
@return: tuple of (directories, files)
@rtype: C{tuple} of C{list}
"""
files = []
dirs = []
for path in directory:
url = urllib.quote(path, "/")
escapedPath = cgi.escape(path)
if os.path.isdir(os.path.join(self.path, path)):
url = url + '/'
dirs.append({'text': escapedPath + "/", 'href': url,
'size': '', 'type': '[Directory]',
'encoding': ''})
else:
mimetype, encoding = getTypeAndEncoding(path, self.contentTypes,
self.contentEncodings,
self.defaultType)
try:
size = os.stat(os.path.join(self.path, path)).st_size
except OSError:
continue
files.append({
'text': escapedPath, "href": url,
'type': '[%s]' % mimetype,
'encoding': (encoding and '[%s]' % encoding or ''),
'size': formatFileSize(size)})
return dirs, files
def _buildTableContent(self, elements):
"""
Build a table content using C{self.linePattern} and giving elements odd
and even classes.
"""
tableContent = []
rowClasses = itertools.cycle(['odd', 'even'])
for element, rowClass in zip(elements, rowClasses):
element["class"] = rowClass
tableContent.append(self.linePattern % element)
return tableContent
def render(self, request):
"""
Render a listing of the content of C{self.path}.
"""
request.setHeader("content-type", "text/html; charset=utf-8")
if self.dirs is None:
directory = os.listdir(self.path)
directory.sort()
else:
directory = self.dirs
dirs, files = self._getFilesAndDirectories(directory)
tableContent = "".join(self._buildTableContent(dirs + files))
header = "Directory listing for %s" % (
cgi.escape(urllib.unquote(request.uri)),)
return self.template % {"header": header, "tableContent": tableContent}
def __repr__(self):
return '<DirectoryLister of %r>' % self.path
__str__ = __repr__
| agpl-3.0 |
anbasile/flask_sample | flask/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/universaldetector.py | 1776 | 6840 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
import codecs
from .latin1prober import Latin1Prober # windows-1252
from .mbcsgroupprober import MBCSGroupProber # multi-byte character sets
from .sbcsgroupprober import SBCSGroupProber # single-byte character sets
from .escprober import EscCharSetProber # ISO-2122, etc.
import re
MINIMUM_THRESHOLD = 0.20
ePureAscii = 0
eEscAscii = 1
eHighbyte = 2
class UniversalDetector:
def __init__(self):
self._highBitDetector = re.compile(b'[\x80-\xFF]')
self._escDetector = re.compile(b'(\033|~{)')
self._mEscCharSetProber = None
self._mCharSetProbers = []
self.reset()
def reset(self):
self.result = {'encoding': None, 'confidence': 0.0}
self.done = False
self._mStart = True
self._mGotData = False
self._mInputState = ePureAscii
self._mLastChar = b''
if self._mEscCharSetProber:
self._mEscCharSetProber.reset()
for prober in self._mCharSetProbers:
prober.reset()
def feed(self, aBuf):
if self.done:
return
aLen = len(aBuf)
if not aLen:
return
if not self._mGotData:
# If the data starts with BOM, we know it is UTF
if aBuf[:3] == codecs.BOM_UTF8:
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8-SIG", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_LE:
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_BE:
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0}
elif aBuf[:4] == b'\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {
'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0
}
elif aBuf[:4] == b'\x00\x00\xFF\xFE':
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {
'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0
}
elif aBuf[:2] == codecs.BOM_LE:
# FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif aBuf[:2] == codecs.BOM_BE:
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0}
self._mGotData = True
if self.result['encoding'] and (self.result['confidence'] > 0.0):
self.done = True
return
if self._mInputState == ePureAscii:
if self._highBitDetector.search(aBuf):
self._mInputState = eHighbyte
elif ((self._mInputState == ePureAscii) and
self._escDetector.search(self._mLastChar + aBuf)):
self._mInputState = eEscAscii
self._mLastChar = aBuf[-1:]
if self._mInputState == eEscAscii:
if not self._mEscCharSetProber:
self._mEscCharSetProber = EscCharSetProber()
if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': self._mEscCharSetProber.get_charset_name(),
'confidence': self._mEscCharSetProber.get_confidence()}
self.done = True
elif self._mInputState == eHighbyte:
if not self._mCharSetProbers:
self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(),
Latin1Prober()]
for prober in self._mCharSetProbers:
if prober.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': prober.get_charset_name(),
'confidence': prober.get_confidence()}
self.done = True
break
def close(self):
if self.done:
return
if not self._mGotData:
if constants._debug:
sys.stderr.write('no data received!\n')
return
self.done = True
if self._mInputState == ePureAscii:
self.result = {'encoding': 'ascii', 'confidence': 1.0}
return self.result
if self._mInputState == eHighbyte:
proberConfidence = None
maxProberConfidence = 0.0
maxProber = None
for prober in self._mCharSetProbers:
if not prober:
continue
proberConfidence = prober.get_confidence()
if proberConfidence > maxProberConfidence:
maxProberConfidence = proberConfidence
maxProber = prober
if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD):
self.result = {'encoding': maxProber.get_charset_name(),
'confidence': maxProber.get_confidence()}
return self.result
if constants._debug:
sys.stderr.write('no probers hit minimum threshhold\n')
for prober in self._mCharSetProbers[0].mProbers:
if not prober:
continue
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(),
prober.get_confidence()))
| mit |
pkoutsias/SickRage | lib/chardet/jisfreq.py | 342 | 25777 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
#
# Japanese frequency table, applied to both S-JIS and EUC-JP
# They are sorted in order.
# 128 --> 0.77094
# 256 --> 0.85710
# 512 --> 0.92635
# 1024 --> 0.97130
# 2048 --> 0.99431
#
# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
#
# Typical Distribution Ratio, 25% of IDR
JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
# Char to FreqOrder table ,
JIS_TABLE_SIZE = 4368
JIS_CHAR_TO_FREQ_ORDER = (
40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
)
| gpl-3.0 |
waseem18/oh-mainline | vendor/packages/Pygments/pygments/formatters/_mapping.py | 263 | 5508 | # -*- coding: utf-8 -*-
"""
pygments.formatters._mapping
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter mapping defintions. This file is generated by itself. Everytime
you change something on a builtin formatter defintion, run this script from
the formatters folder to update it.
Do not alter the FORMATTERS dictionary by hand.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# start
from pygments.formatters.bbcode import BBCodeFormatter
from pygments.formatters.html import HtmlFormatter
from pygments.formatters.img import BmpImageFormatter
from pygments.formatters.img import GifImageFormatter
from pygments.formatters.img import ImageFormatter
from pygments.formatters.img import JpgImageFormatter
from pygments.formatters.latex import LatexFormatter
from pygments.formatters.other import NullFormatter
from pygments.formatters.other import RawTokenFormatter
from pygments.formatters.rtf import RtfFormatter
from pygments.formatters.svg import SvgFormatter
from pygments.formatters.terminal import TerminalFormatter
from pygments.formatters.terminal256 import Terminal256Formatter
FORMATTERS = {
BBCodeFormatter: ('BBCode', ('bbcode', 'bb'), (), 'Format tokens with BBcodes. These formatting codes are used by many bulletin boards, so you can highlight your sourcecode with pygments before posting it there.'),
BmpImageFormatter: ('img_bmp', ('bmp', 'bitmap'), ('*.bmp',), 'Create a bitmap image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
GifImageFormatter: ('img_gif', ('gif',), ('*.gif',), 'Create a GIF image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
HtmlFormatter: ('HTML', ('html',), ('*.html', '*.htm'), "Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass` option."),
ImageFormatter: ('img', ('img', 'IMG', 'png'), ('*.png',), 'Create a PNG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
JpgImageFormatter: ('img_jpg', ('jpg', 'jpeg'), ('*.jpg',), 'Create a JPEG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
LatexFormatter: ('LaTeX', ('latex', 'tex'), ('*.tex',), 'Format tokens as LaTeX code. This needs the `fancyvrb` and `color` standard packages.'),
NullFormatter: ('Text only', ('text', 'null'), ('*.txt',), 'Output the text unchanged without any formatting.'),
RawTokenFormatter: ('Raw tokens', ('raw', 'tokens'), ('*.raw',), 'Format tokens as a raw representation for storing token streams.'),
RtfFormatter: ('RTF', ('rtf',), ('*.rtf',), 'Format tokens as RTF markup. This formatter automatically outputs full RTF documents with color information and other useful stuff. Perfect for Copy and Paste into Microsoft\xc2\xae Word\xc2\xae documents.'),
SvgFormatter: ('SVG', ('svg',), ('*.svg',), 'Format tokens as an SVG graphics file. This formatter is still experimental. Each line of code is a ``<text>`` element with explicit ``x`` and ``y`` coordinates containing ``<tspan>`` elements with the individual token styles.'),
Terminal256Formatter: ('Terminal256', ('terminal256', 'console256', '256'), (), 'Format tokens with ANSI color sequences, for output in a 256-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'),
TerminalFormatter: ('Terminal', ('terminal', 'console'), (), 'Format tokens with ANSI color sequences, for output in a text console. Color sequences are terminated at newlines, so that paging the output works correctly.')
}
if __name__ == '__main__':
import sys
import os
# lookup formatters
found_formatters = []
imports = []
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
from pygments.util import docstring_headline
for filename in os.listdir('.'):
if filename.endswith('.py') and not filename.startswith('_'):
module_name = 'pygments.formatters.%s' % filename[:-3]
print module_name
module = __import__(module_name, None, None, [''])
for formatter_name in module.__all__:
imports.append((module_name, formatter_name))
formatter = getattr(module, formatter_name)
found_formatters.append(
'%s: %r' % (formatter_name,
(formatter.name,
tuple(formatter.aliases),
tuple(formatter.filenames),
docstring_headline(formatter))))
# sort them, that should make the diff files for svn smaller
found_formatters.sort()
imports.sort()
# extract useful sourcecode from this file
f = open(__file__)
try:
content = f.read()
finally:
f.close()
header = content[:content.find('# start')]
footer = content[content.find("if __name__ == '__main__':"):]
# write new file
f = open(__file__, 'w')
f.write(header)
f.write('# start\n')
f.write('\n'.join(['from %s import %s' % imp for imp in imports]))
f.write('\n\n')
f.write('FORMATTERS = {\n %s\n}\n\n' % ',\n '.join(found_formatters))
f.write(footer)
f.close()
| agpl-3.0 |
CI-WATER/portal | src/ckanext-uebpackage/ckanext/uebpackage/controllers/packagecreate.py | 1 | 127106 | import ckan.lib.base as base
import logging
import ckan.plugins as p
from ckan.lib.helpers import json
import ckan.lib.uploader as uploader
import ckan.lib.munge as munge
from datetime import datetime
import os
import shutil
import httplib
from .. import helpers as uebhelper
tk = p.toolkit
_ = tk._ # translator function
log = logging.getLogger('ckan.logic')
class PackagecreateController(base.BaseController):
def packagecreateform(self):
tk.c.form_stage = 'stage_1'
_set_context_to_shape_file_resources()
# set default values for stage_1
errors = {}
data = {}
data['domainfiletypeoption'] = 'polygon'
data['domainshapefile'] = None
data['startdate'] = '01/01/2011'
data['enddate'] = '12/31/2011'
data['buffersize'] = 500
data['gridcellsize'] = 100
error_summary = {}
stages = ['active', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive',
'inactive', 'inactive']
form_vars = {'data': data, 'errors': errors, 'error_summary': error_summary, 'stages': stages}
return tk.render('packagecreateform.html', extra_vars=form_vars)
#@validate(schema=TestFormSchema(), form='form', post_only=False, on_get=True)
def submit(self):
form_stage = tk.request.params['form_stage']
if form_stage != 'stage_confirm':
form_vars = _validate_form()
if form_vars['error_summary']:
if form_stage == 'stage_1':
_set_context_to_shape_file_resources()
if form_stage in ['stage_2', 'stage_8', 'stage_9']:
tk.c.ueb_dat_files = _set_context_to_file_resources('dat')
if form_stage in ['stage_3', 'stage_4', 'stage_5', 'stage_6', 'stage_8']:
tk.c.ueb_nc_files = _set_context_to_file_resources('nc')
return tk.render('packagecreateform.html', extra_vars=form_vars)
session = base.session
if form_stage == 'stage_1':
form_stage_number = 1
elif form_stage == 'stage_2':
form_stage_number = 2
elif form_stage == 'stage_3':
form_stage_number = 3
elif form_stage == 'stage_4':
form_stage_number = 4
elif form_stage == 'stage_5':
form_stage_number = 5
elif form_stage == 'stage_6':
form_stage_number = 6
elif form_stage == 'stage_7':
form_stage_number = 7
elif form_stage == 'stage_8':
form_stage_number = 8
else:
form_stage_number = 9
if "submit" in tk.request.params:
request_data_in_json = _get_package_request_in_json_format()
data = {}
tk.c.selected_data = request_data_in_json
tk.c.form_stage = 'stage_confirm'
form_vars['data'] = data
form_vars['stages'] = {}
return tk.render('packagecreateform.html', extra_vars=form_vars)
elif 'confirm' in tk.request.params:
return _process_ueb_pkg_request_submit()
# return 'Your UEB model package request is now in a queue for processing.'
elif 'next' in tk.request.params:
form_stage_number += 1
elif "prev" in tk.request.params:
form_stage_number -= 1
else: # 'edit' in tk.request.params:
edit_stages = range(1, 10)
for stage in edit_stages:
edit_btn_name = 'edit_%d' % stage
if edit_btn_name in tk.request.params:
form_stage_number = stage
break
errors = {}
data = {}
error_summary = {}
stages = []
form_vars = {'data': data, 'errors': errors, 'error_summary': error_summary, 'stages': stages}
form_stage = "stage_" + str(form_stage_number)
if form_stage in session:
data = session[form_stage]
else:
data = _get_default_data(form_stage)
tk.c.form_stage = form_stage
if form_stage_number == 1:
_set_context_to_shape_file_resources()
stages = ['active', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive',
'inactive']
elif form_stage_number == 2:
tk.c.ueb_dat_files = _set_context_to_file_resources('dat')
stages = ['inactive', 'active', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive',
'inactive']
elif form_stage_number == 3:
tk.c.ueb_nc_files = _set_context_to_file_resources('nc')
stages = ['inactive', 'inactive', 'active', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive',
'inactive']
elif form_stage_number == 4:
tk.c.ueb_nc_files = _set_context_to_file_resources('nc')
stages = ['inactive', 'inactive', 'inactive', 'active', 'inactive', 'inactive', 'inactive', 'inactive',
'inactive']
elif form_stage_number == 5:
tk.c.ueb_nc_files = _set_context_to_file_resources('nc')
stages = ['inactive', 'inactive', 'inactive', 'inactive', 'active', 'inactive', 'inactive', 'inactive',
'inactive']
elif form_stage_number == 6:
tk.c.ueb_nc_files = _set_context_to_file_resources('nc')
stages = ['inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'active', 'inactive', 'inactive',
'inactive']
elif form_stage_number == 7:
stages = ['inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'active', 'inactive',
'inactive']
elif form_stage_number == 8:
tk.c.ueb_dat_files = _set_context_to_file_resources('dat')
tk.c.ueb_nc_files = _set_context_to_file_resources('nc')
stages = ['inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'active',
'inactive']
else:
tk.c.ueb_dat_files = _set_context_to_file_resources('dat')
stages = ['inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive',
'active']
form_vars['data'] = data
form_vars['stages'] = stages
return tk.render('packagecreateform.html', extra_vars=form_vars)
def check_package_build_status(self, pkg_id):
"""
Retrievs the status of the model package build process from the app server for a given model configuration
dataset
@param pkg_id: id of the model configuration dataset for which the package build status to be obtained
@return: ajax_response object
"""
source = 'uebpackage.packagecreate.check_package_build_status():'
service_host_address = uebhelper.StringSettings.app_server_host_address
service_request_api_url = uebhelper.StringSettings.app_server_api_check_ueb_package_build_status
connection = httplib.HTTPConnection(service_host_address)
package = uebhelper.get_package(pkg_id)
ajax_response = uebhelper.AJAXResponse()
ajax_response.success = False
ajax_response.message = "Not a valid package for status check"
if package['type'] == 'model-configuration':
if package.get('processing_status', None):
if package['processing_status'] == 'In Queue' or package['processing_status'] == 'Processing':
pkg_process_job_id = package.get('package_build_request_job_id', None)
pkg_current_processing_status = package.get('processing_status', None)
if pkg_process_job_id:
service_request_url = service_request_api_url + '?packageID=' + pkg_process_job_id
connection.request('GET', service_request_url)
service_call_results = connection.getresponse()
if service_call_results.status == httplib.OK:
request_processing_status = service_call_results.read()
log.info(source + 'UEB model package build status as returned from App server '
'for PackageJobID:%s is %s' % (pkg_process_job_id,
request_processing_status))
else:
request_processing_status = uebhelper.StringSettings.app_server_job_status_error
log.error(source + 'HTTP status %d returned from App server when checking '
'status for PackageJobID:%s' % (service_call_results.status, pkg_process_job_id))
ajax_response.success = False
ajax_response.message = "Error in checking status"
connection.close()
# update the dataset if the status has changed
if pkg_current_processing_status != request_processing_status:
if request_processing_status == 'Success':
data_dict = {'processing_status': request_processing_status,
'package_availability': uebhelper.StringSettings.app_server_job_status_package_ready_to_retrieve}
else:
data_dict = {'processing_status': request_processing_status}
uebhelper.update_package(pkg_id, data_dict, backgroundTask=False)
ajax_response.success = True
ajax_response.message = "Status check was successful"
ajax_response.json_data = request_processing_status
return ajax_response.to_json()
def retrieve_input_package(self, pkg_id):
"""
Retrieves the model input package from the app server and saves it as a new dataset
@param pkg_id: id of the model configuration dataset for which the model input package to be retrieved
@rtype: ajax_response object
"""
source = 'uebpackage.packagecreate.retrieve_input_package():'
service_host_address = uebhelper.StringSettings.app_server_host_address
service_request_api_url = uebhelper.StringSettings.app_server_api_get_ueb_package_url
connection = httplib.HTTPConnection(service_host_address)
package = uebhelper.get_package(pkg_id)
ajax_response = uebhelper.AJAXResponse()
ajax_response.success = False
ajax_response.message = "Not a valid UEB model configuration datatset for model package retrieval"
if package['type'] == 'model-configuration':
if package.get('processing_status', None):
if package['package_availability'] == 'Ready to retrieve':
pkg_process_job_id = package.get('package_build_request_job_id', None)
pkg_current_availability_status = package.get('package_availability', None)
if pkg_process_job_id:
service_request_url = service_request_api_url + '?packageID=' + pkg_process_job_id
connection.request('GET', service_request_url)
service_call_results = connection.getresponse()
if service_call_results.status == httplib.OK:
log.info(source + 'UEB model package was received from App server for PackageJobID:%s' % pkg_process_job_id)
try:
_save_ueb_package_as_dataset(service_call_results, pkg_id)
pkg_availability_status = uebhelper.StringSettings.app_server_job_status_package_available
ajax_response.success = True
ajax_response.message = "Model package retrieval was successful"
ajax_response.json_data = pkg_availability_status
except Exception as e:
log.error(source + 'Failed to save ueb model package as a new dataset '
'for model configuration dataset ID:%s\nException:%s' % (pkg_id, e))
pkg_availability_status = uebhelper.StringSettings.app_server_job_status_error
ajax_response.success = False
ajax_response.message = "Failed to save the retrieved model package"
ajax_response.json_data = pkg_availability_status
else:
log.error(source + 'HTTP status %d returned from App server when retrieving '
'UEB model package for PackageJobID:'
'%s' % (service_call_results.status, pkg_process_job_id))
pkg_availability_status = uebhelper.StringSettings.app_server_job_status_error
ajax_response.success = False
ajax_response.message = "Error in retrieving the model package"
ajax_response.json_data = pkg_availability_status
connection.close()
# update the related model-configuration dataset status
data_dict = {'package_availability': pkg_availability_status}
update_msg = 'updated package availability status'
background_task = False
if pkg_current_availability_status != pkg_availability_status:
try:
updated_package = uebhelper.update_package(pkg_id, data_dict, update_msg,
background_task)
log.info(source + 'UEB model configuration dataset was updated as a result of '
'receiving model input package for dataset:%s'
% updated_package['name'])
except Exception as e:
log.error(source + 'Failed to update UEB model configuration dataset after '
'receiving model input package for dataset ID:%s \n'
'Exception: %s' % (pkg_id, e))
pass
return ajax_response.to_json()
def _save_ueb_package_as_dataset(service_call_results, model_config_dataset_id):
"""
Saves the model input package obtained for the app server as a new dataset
@param service_call_results: service response obtained from the app server
@param model_config_dataset_id: id of the model configuration dataset for which the generated model package to be
retrieved.
@return:
"""
source = 'uebpackage.packagecreate._save_ueb_package_as_dataset():'
ckan_default_dir = uebhelper.StringSettings.ckan_user_session_temp_dir # '/tmp/ckan'
# get the matching model configuration dataset object
model_config_dataset_obj = base.model.Package.get(model_config_dataset_id)
model_config_dataset_title = model_config_dataset_obj.title
model_config_dataset_owner_org = model_config_dataset_obj.owner_org
model_config_dataset_author = model_config_dataset_obj.author
# create a directory for saving the file
# this will be a dir in the form of: /tmp/ckan/{random_id}
random_id = base.model.types.make_uuid()
destination_dir = os.path.join(ckan_default_dir, random_id)
os.makedirs(destination_dir)
model_pkg_filename = uebhelper.StringSettings.ueb_input_model_package_default_filename # 'ueb_model_pkg.zip'
model_pkg_file = os.path.join(destination_dir, model_pkg_filename)
bytes_to_read = 16 * 1024
try:
with open(model_pkg_file, 'wb') as file_obj:
while True:
data = service_call_results.read(bytes_to_read)
if not data:
break
file_obj.write(data)
except Exception as e:
log.error(source + 'Failed to save the ueb_package zip file to temporary '
'location for UEB model configuration dataset ID: %s \n '
'Exception: %s' % (model_config_dataset_id, e))
raise e
log.info(source + 'ueb_package zip file was saved to temporary location for '
'UEB model configuration dataset ID: %s' % model_config_dataset_id)
user = uebhelper.get_site_user()
# create a package
package_create_action = tk.get_action('package_create')
# create unique package name using the current time stamp as a postfix to any package name
unique_postfix = datetime.now().isoformat().replace(':', '-').replace('.', '-').lower()
pkg_title = model_config_dataset_title
data_dict = {
'name': 'model_package_' + unique_postfix, # this needs to be unique as required by DB
'type': 'model-package', # dataset type as defined in custom dataset plugin
'title': pkg_title,
'owner_org': model_config_dataset_owner_org,
'author': model_config_dataset_author,
'notes': 'UEB model package',
'pkg_model_name': 'UEB',
'model_version': '1.0',
'north_extent': '',
'south_extent': '',
'east_extent': '',
'west_extent': '',
'simulation_start_day': '',
'simulation_end_day': '',
'time_step': '',
'package_type': u'Input',
'package_run_status': 'Not yet submitted',
'package_run_job_id': '',
'dataset_type': 'model-package'
}
context = {'model': base.model, 'session': base.model.Session, 'ignore_auth': True, 'user': user.get('name'), 'save': 'save'}
try:
pkg_dict = package_create_action(context, data_dict)
log.info(source + 'A new dataset was created for UEB input model package with name: %s' % data_dict['title'])
except Exception as e:
log.error(source + 'Failed to create a new dataset for ueb input model package for'
' the related model configuration dataset title: %s \n Exception: %s' % (pkg_title, e))
raise e
if not 'resources' in pkg_dict:
pkg_dict['resources'] = []
try:
file_name = model_pkg_filename # munge.munge_filename(model_pkg_filename)
resource = {'url': file_name, 'url_type': 'upload'}
upload = uploader.ResourceUpload(resource)
upload.filename = file_name
upload.upload_file = open(model_pkg_file, 'r')
data_dict = {'format': 'zip', 'name': file_name, 'url': file_name, 'url_type': 'upload'}
pkg_dict['resources'].append(data_dict)
except Exception as e:
log.error(source + ' Failed to save the model package zip file as'
' a resource.\n Exception: %s' % e)
try:
context['defer_commit'] = True
context['use_cache'] = False
# update the package
package_update_action = tk.get_action('package_update')
package_update_action(context, pkg_dict)
context.pop('defer_commit')
except Exception as e:
log.error(source + ' Failed to update the new dataset for adding the input model package zip file as'
' a resource.\n Exception: %s' % e)
raise e
# Get out resource_id resource from model as it will not appear in
# package_show until after commit
upload.upload(context['package'].resources[-1].id, uploader.get_max_resource_size())
base.model.repo.commit()
# update the related model configuration dataset to show that the package is available
data_dict = {'package_availability': 'Available'}
update_msg = 'system auto updated ueb package dataset'
background_task = False
try:
updated_package = uebhelper.update_package(model_config_dataset_id, data_dict, update_msg, background_task)
log.info(source + 'UEB model configuration dataset was updated as a result of '
'receiving model input package for dataset:%s' % updated_package['name'])
except Exception as e:
log.error(source + 'Failed to update UEB model configuration dataset after '
'receiving model input package for dataset ID:%s \n'
'Exception: %s' % (model_config_dataset_id, e))
raise e
def _process_ueb_pkg_request_submit():
pkgname = base.session['stage_1']['pkgname']
selected_user_org = base.session['stage_1']['owner_org']
ueb_pkg_request = _get_package_request_in_json_format()
selected_file_ids = ueb_pkg_request['selected_file_ids']
ueb_pkg_request_in_json = ueb_pkg_request['ueb_req_json']
model_configuration_pkg_id = _save_ueb_request_as_dataset(pkgname, selected_file_ids, selected_user_org)
if model_configuration_pkg_id:
request_zip_file = _create_ueb_pkg_build_request_zip_file(ueb_pkg_request_in_json, selected_file_ids)
pkg_process_id = _send_request_to_app_server(request_zip_file)
job_status_queue = uebhelper.StringSettings.app_server_job_status_in_queue
data_dict = {'package_build_request_job_id': pkg_process_id, 'processing_status': job_status_queue}
uebhelper.update_package(model_configuration_pkg_id, data_dict)
base.session.clear()
tk.c.request_process_job_id = pkg_process_id
return tk.render('package_build_request_submission.html')
else:
# TODO: show error page
return "Error"
def _send_request_to_app_server(request_zip_file):
source = 'uebpackage.uebpackage.packagerequest._send_request_to_app_server():'
service_host_address = uebhelper.StringSettings.app_server_host_address
service_request_url = uebhelper.StringSettings.app_server_api_generate_ueb_package_url
connection = httplib.HTTPConnection(service_host_address)
headers = {'Content-Type': 'application/text', 'Accept': 'application/text'}
# get request data from the zip file
with open(request_zip_file, 'r') as file_obj:
file_data = file_obj.read()
request_body_content = file_data
# call the service TODO: see if we can pass the open file object as request_body_content
connection.request('POST', service_request_url, request_body_content, headers)
# retrieve response
service_call_results = connection.getresponse()
package_id = None
if service_call_results.status == httplib.OK:
log.info(source + 'UEB model build package request was sent to app server')
service_response_data = service_call_results.read()
connection.close()
# convert the json data from the app server to a python dict object
service_response_dict = json.loads(service_response_data)
package_id = service_response_dict.get('PackageID', None)
response_msg = service_response_dict.get('Message', '')
if not package_id:
log.error(source + 'App server failed to process model package build request')
tk.abort(400, _('App server failed to process model package build request: %s') % response_msg)
else:
connection.close()
tk.abort(400, _('App server failed to process model package build request: %s') % service_call_results.reason)
# cleanup the temp data directory created previously
#ckan_default_dir = _get_predefined_name('ckan_user_session_temp_dir') #'/tmp/ckan'
ckan_default_dir = uebhelper.StringSettings.ckan_user_session_temp_dir
try:
shutil.rmtree(ckan_default_dir)
except:
pass
return package_id
def _update_request_resource_process_job_id(ueb_model_pkg_request_resource_id, pkg_process_id):
"""
Updates a ueb model package request resource's 'extras' field to include
PackageProcessJobID: param pkg_process_id
Note that the extra field in resource table holds a json string
param ueb_model_pkg_request_resource_id: id of the resource to be updated
param pkg_process_id: package id returned from app server responsible for generating the model package
"""
#matching_resource = _get_resource(ueb_model_pkg_request_resource_id)
#resource_update_action = tk.get_action('resource_update')
#context = {'model': base.model, 'session': base.model.Session,
# 'user': base.c.user or base.c.author}
# the data_dict needs to be the resource metadata for an existing resource
# (all fields and their corresponding values)
# once we have retrieved a resource we can update value for any fields
# by assigning new value to that field except for the 'extras' field.
# the extras field is not part of the resource metadata when you retrieve a resource
# from filestore. Since the extras field holds a json string that contains key/value pairs,
# the way to update the extra field is to add a new key/value pair
# to the resource metadata dict object where the key is not the name of a field in resource table.
# For example, as shown below, we are storing a value for PackageProcessJobID
# which will be added/updated to the existing json string stored in the extras field
data_dict = {'PackageProcessJobID': pkg_process_id}
updated_resource = _update_resource(ueb_model_pkg_request_resource_id, data_dict)
return updated_resource
def _update_request_resource_process_status(ueb_model_pkg_request_resource_id, status):
data_dict = {'PackageProcessingStatus': status}
updated_resource = _update_resource(ueb_model_pkg_request_resource_id, data_dict)
return updated_resource
def _update_resource(resource_id, data_dict):
"""
Updates a resource identified by resource_id
with fields and corresponding values found in the data_dict
Note: if key is not a field of the resource table in the data_dict
that key/value pair will be added/updated to the 'extras' field of the resource
"""
matching_resource = _get_resource(resource_id)
resource_update_action = tk.get_action('resource_update')
context = {'model': base.model, 'session': base.model.Session,
'user': tk.c.user or tk.c.author}
for key, value in data_dict.items():
matching_resource[key] = value
updated_resource = resource_update_action(context, matching_resource)
return updated_resource
def _create_ueb_pkg_build_request_zip_file(ueb_pkg_request_in_json, selected_file_ids):
"""
Creates a zip file containing all the files the user selected in configuring
ueb model as well as the text file in the form of a json string that contains
all the parameters and their values selected.
param ueb_pkg_request_in_json: json string that contains user package build request details
param: selected_file_ids a dict in which each value is a file id
rtype: a string representing the location and name of the zip file
"""
#ckan_default_dir = _get_predefined_name('ckan_user_session_temp_dir') #'/tmp/ckan'
ckan_default_dir = uebhelper.StringSettings.ckan_user_session_temp_dir
# save the ueb request json string to the default directory specified by ckan_default_dir
destination_session_dir = os.path.join(ckan_default_dir, base.session.id)
destination_files_dir = os.path.join(destination_session_dir, 'files')
if not os.path.isdir(destination_files_dir):
os.makedirs(destination_files_dir)
#ueb_request_json_file_name = _get_predefined_name('ueb_request_json_file_name')
ueb_request_json_file_name = uebhelper.StringSettings.ueb_request_json_file_name
request_file_json = os.path.join(destination_files_dir, ueb_request_json_file_name)
with open(request_file_json, 'w') as file_obj:
file_obj.write(ueb_pkg_request_in_json)
resource_show_action = tk.get_action('resource_show')
context = {'model': base.model, 'session': base.model.Session,
'user': tk.c.user or tk.c.author}
# get the storage path
ckan_storage_path = os.path.join(uploader.get_storage_path(), 'resources')
# for each file id, get the file object and write to the temp destination dir
for file_id in selected_file_ids.values():
# Note:4/22/2014: The way we are finding the filepath for a resource
# no more works in the new ckan 2.2 as the storage strategy has changed
# Refer to the uploader.py module and see how it gets the file path
# from the id of the resource. Use that technique to read each of the
# files to be part of the zip file
# new code
resource_file_path = os.path.join(ckan_storage_path, file_id[0:3], file_id[3:6], file_id[6:])
resource_file_obj = open(resource_file_path, 'r')
resource_file_obj.seek(0)
data_dict = {'id': file_id}
matching_file_resource = resource_show_action(context, data_dict)
file_name = matching_file_resource.get('name')
# save file to the temp dir
dest_file_path = os.path.join(destination_files_dir, file_name)
dest_file_obj = open(dest_file_path, 'wb+')
while True:
data = resource_file_obj.read(2 ** 20)
if not data:
break
dest_file_obj.write(data)
resource_file_obj.close()
dest_file_obj.close()
# zip all the files in the destination_files_dir to a new zip folder under the destination_session_dir
# make a folder to store the zip file
destination_zip_dir = os.path.join(destination_session_dir, 'zip')
os.mkdir(destination_zip_dir)
destination_zip_file_path = os.path.join(destination_zip_dir, 'ueb_pkg_request') # leave out the extension here
source_files_dir = destination_files_dir
shutil.make_archive(destination_zip_file_path, format='zip', root_dir=source_files_dir)
zipped_request_file = destination_zip_file_path + '.zip'
return zipped_request_file
def _set_context_to_shape_file_resources():
"""
create a list of shape files in the system that are owned
by the current user. If the user has uploaded it then he/she owns it.
@return: None
"""
geographic_fs_datasets = uebhelper.get_packages_by_dataset_type('geographic-feature-set')
# for each resource we need only the id (id be used as the selection value) and the name for display
file_resources = []
resource = {'id': 0, 'name': 'Select a shape file ..'}
file_resources.append(resource)
for gfs_dataset in geographic_fs_datasets:
gfs_resources = gfs_dataset['resources']
for resource in gfs_resources:
if resource['format'].lower() == 'zip' and \
(resource['resource_type'] == 'file.upload' or resource['url_type'] == 'upload') and \
resource['state'] == 'active':
# check if the file resource is owned by the current user
user_owns_resource = uebhelper.is_user_owns_resource(resource['id'], tk.c.user)
if user_owns_resource:
selected_resource = {'id': resource['id'], 'name': resource['name']}
file_resources.append(selected_resource)
tk.c.ueb_domain_shape_files = file_resources
def _set_context_to_file_resources(file_extension):
"""
This will create a list of all files that has
extension of file_extension and return the matching list of
resources
"""
# TODO: save the list of file resources for a given extension type
# in the session object so that we do not run this function for
# each page of the form
# note: resource_search returns a list of matching resources
# that can include any deleted resources
resource_search_action = tk.get_action('resource_search')
context = {'model': base.model, 'session': base.model.Session,
'user': tk.c.user or tk.c.author, 'for_view': True}
# get the resource that has the format field set to file_extension
# and resource_type to file.upload
data_dict = {'query': ['format:' + file_extension]}
shape_file_resources = resource_search_action(context, data_dict)['results']
# for each resource we need only the id (id be used as the selection value) and the name for display
file_resources = []
resource = {'id': 0, 'name': 'Select a file ..'}
file_resources.append(resource)
for file_resource in shape_file_resources:
resource = {}
# filter out any deleted resources
active_resource = _get_resource(file_resource['id'])
if not active_resource:
continue
# filter out any resources that has resource_type as file.link
if file_resource['resource_type'] == 'file.link':
continue
# check if the file resource is owned by the current user
user_owns_resource = uebhelper.is_user_owns_resource(file_resource['id'], tk.c.user)
if user_owns_resource:
resource['id'] = file_resource['id']
resource['name'] = file_resource['name']
file_resources.append(resource)
return file_resources
def _get_file_name_from_file_id(file_id):
matching_file_resource = _get_resource(file_id)
return matching_file_resource['name']
def _get_file_id_from_file_name(package_id, filename):
package_show_action = tk.get_action('package_show')
context = {'model': base.model, 'session': base.model.Session,
'user': tk.c.user or tk.c.author}
# get the dataset/package that has the id equal to the given dataset/package name
# (note: name is unique in package table)
data_dict = {'id': package_id}
matching_package_with_resources = package_show_action(context, data_dict)
files = matching_package_with_resources['resources']
for r_file in files:
if r_file['name'] == filename:
return r_file['id']
return ''
def _get_package(pkg_id_or_name):
package_show_action = tk.get_action('package_show')
context = {'model': base.model, 'session': base.model.Session,
'user': tk.c.user or tk.c.author}
# get the resource that has the id equal to the given resource id or name
data_dict = {'id': pkg_id_or_name }
matching_package_with_resources = package_show_action(context, data_dict)
return matching_package_with_resources
def _get_resource(resource_id):
resource_show_action = tk.get_action('resource_show')
context = {'model': base.model, 'session': base.model.Session,
'user': tk.c.user or tk.c.author}
# get the resource that has the id equal to the given resource id
data_dict = {'id': resource_id}
matching_resource = None
# if the resource does not exist or it is a deleted resource
# resource_show action will throw ObjectNotFound exception
try:
matching_resource = resource_show_action(context, data_dict)
except tk.ObjectNotFound:
pass
return matching_resource
def _get_ueb_pkg_request_resources_pending_processing():
"""
Returns a list of package request resources that are currently
have status set to 'Processing'
"""
resource_search_action = tk.get_action('resource_search')
context = {'model': base.model, 'session': base.model.Session,
'user': tk.c.user or tk.c.author}
# get the resource that has the format field set to zip and description field contains 'shape'
data_dict = {'query': ['PackageProcessingStatus:Processing']}
matching_resources = resource_search_action(context, data_dict)['results']
return matching_resources
def _validate_form():
form_stage = tk.request.params['form_stage']
if form_stage == 'stage_1':
return _validate_stage_one()
if form_stage == 'stage_2':
return _validate_stage_two()
if form_stage == 'stage_3':
return _validate_stage_three()
if form_stage == 'stage_4':
return _validate_stage_four()
if form_stage == 'stage_5':
return _validate_stage_five()
if form_stage == 'stage_6':
return _validate_stage_six()
if form_stage == 'stage_7':
return _validate_stage_seven()
if form_stage == 'stage_8':
return _validate_stage_eight()
if form_stage == 'stage_9':
return _validate_stage_nine()
def _validate_stage_one():
session = base.session
errors = {}
data = {}
error_summary = {}
stages = []
form_vars = {'data': data, 'errors': errors, 'error_summary':error_summary, 'stages': stages}
form_stage = 'stage_1'
pkgname = tk.request.params['pkgname']
pkgdescription = tk.request.params['pkgdescription']
domainfiletypeoption = tk.request.params['domainfiletypeoption']
domainshapefile = tk.request.params['domainshapefile']
domainnetcdfile = tk.request.params['domainnetcdfile']
domainnetcdffileformat = tk.request.params['domainnetcdffileformat']
buffersize = tk.request.params['buffersize']
gridcellsize = tk.request.params['gridcellsize']
startdate = tk.request.params['startdate']
enddate = tk.request.params['enddate']
timestep = tk.request.params['timestep']
owner_org = tk.request.params['owner_org']
data['pkgname'] = pkgname
data['pkgdescription'] = pkgdescription
data['domainfiletypeoption'] = domainfiletypeoption
data['domainshapefile'] = domainshapefile
data['domainnetcdfile'] = domainnetcdfile
data['domainnetcdffileformat'] = domainnetcdffileformat
data['buffersize'] = buffersize
data['gridcellsize'] = gridcellsize
data['startdate'] = startdate
data['enddate'] = enddate
data['timestep'] = timestep
data['owner_org'] = owner_org
for key in data:
errors[key] = []
context = {}
if form_stage not in session:
session[form_stage] = {}
session[form_stage] = data
session.save()
not_empty_check = tk.get_validator('not_empty') # not_empty(key, data, errors, context):
# Put all the validation functions in a list
# so that we can execute them all even if any one of them throws validation error
actions = [
lambda: not_empty_check('pkgname', data, errors, context),
lambda: not_empty_check('buffersize', data, errors, context),
lambda: not_empty_check('gridcellsize', data, errors, context),
lambda: not_empty_check('startdate', data, errors, context),
lambda: not_empty_check('enddate', data, errors, context),
]
if data['owner_org'] == '':
data['owner_org'] = None
try:
not_empty_check('owner_org', data, errors, context)
except:
pass
if domainfiletypeoption == 'polygon':
if data['domainshapefile'] == '0':
data['domainshapefile'] = None
actions.append(lambda: not_empty_check('domainshapefile', data, errors, context))
else:
errors['domainnetcdfile'].append('Use of netCDF file for domain not yet implemented.')
# TODO: the following 3 lines need to be uncommented when domain netcdf file use is implemented
# at the app server
'''
actions.append(lambda: not_empty_check('domainnetcdfile', data, errors, context))
actions.append(lambda: not_empty_check('domainnetcdffileformat', data, errors, context))
# check if the file selected as domain netcdf file has already been selected for any other input
_validate_file_selection(data, errors, 'domainnetcdffile', 'domain netCDF')
'''
for action in actions:
try:
action()
except:
pass
# Check for numeric data type
try:
int(buffersize)
except ValueError:
errors['buffersize'].append('Buffer size should be an integer value')
try:
int(gridcellsize)
except ValueError:
errors['gridcellsize'].append('Grid cell size should be an integer value')
if len(errors['startdate']) == 0:
try:
date = datetime.strptime(startdate, '%m/%d/%Y')
if date.year != 2011:
errors['startdate'].append('Year should be 2011')
except (TypeError, ValueError):
errors['startdate'].append('Enter a date value')
if len(errors['enddate']) == 0:
try:
date = datetime.strptime(enddate, '%m/%d/%Y')
if date.year != 2011:
errors['enddate'].append('Year should be 2011')
except (TypeError, ValueError):
errors['enddate'].append('Enter a date value')
# Populate the error_summary list
for key in errors:
# Get the error message for the form field (key)
value = errors.get(key)
if value: # error message exists
error_summary[key] = value
tk.c.form_stage = form_stage
stages = ['active', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive']
form_vars['data'] = data
form_vars['stages'] = stages
form_vars['errors'] = errors
form_vars['error_summary'] = error_summary
return form_vars
def _validate_stage_two():
session = base.session
errors = {}
data = {}
error_summary = {}
stages = []
form_vars = {'data': data, 'errors': errors, 'error_summary': error_summary, 'stages': stages}
form_stage = 'stage_2'
parametersfileoption = tk.request.params['parametersfileoption']
parametersfile = tk.request.params['parametersfile']
data['parametersfileoption'] = parametersfileoption
data['parametersfile'] = parametersfile
errors['parametersfile'] = []
context = {}
if form_stage not in session:
session[form_stage] = {}
session[form_stage] = data
session.save()
not_empty_check = tk.get_validator('not_empty') # not_empty(key, data, errors, context):
if parametersfileoption == 'No':
if data['parametersfile'] == '0':
data['parametersfile'] = None
try:
not_empty_check('parametersfile', data, errors, context)
except:
pass
# check if the file selected as parameters file has already been selected for any other input
_validate_file_selection(data, errors, 'parametersfile', 'parameters')
else:
data['parametersfile'] = None
for key in errors:
# Get the error message for the form field (key)
value = errors.get(key)
if value: # error message exists
error_summary[key] = value
tk.c.form_stage = form_stage
stages = ['inactive', 'active', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive']
form_vars['data'] = data
form_vars['stages'] = stages
form_vars['errors'] = errors
form_vars['error_summary'] = error_summary
return form_vars
def _validate_stage_three():
session = base.session
errors = {}
data = {}
error_summary = {}
stages = []
form_vars = {'data': data, 'errors': errors, 'error_summary': error_summary, 'stages': stages}
form_stage = 'stage_3'
usicoption = tk.request.params['usicoption']
usic = tk.request.params['usic']
usicgridfile = tk.request.params['usicgridfile']
usicgridfileformat = tk.request.params['usicgridfileformat']
wsisoption = tk.request.params['wsisoption']
wsis = tk.request.params['wsis']
wsisgridfile = tk.request.params['wsisgridfile']
wsisgridfileformat = tk.request.params['wsisgridfileformat']
ticoption = tk.request.params['ticoption']
tic = tk.request.params['tic']
ticgridfile = tk.request.params['ticgridfile']
ticgridfileformat = tk.request.params['ticgridfileformat']
wcicoption = tk.request.params['wcicoption']
wcic = tk.request.params['wcic']
wcicgridfile = tk.request.params['wcicgridfile']
wcicgridfileformat = tk.request.params['wcicgridfileformat']
data['usicoption'] = usicoption
data['usic'] = usic
data['usicgridfile'] = usicgridfile
data['usicgridfileformat'] = usicgridfileformat
data['wsisoption'] = wsisoption
data['wsis'] = wsis
data['wsisgridfile'] = wsisgridfile
data['wsisgridfileformat'] = wsisgridfileformat
data['ticoption'] = ticoption
data['tic'] = tic
data['ticgridfile'] = ticgridfile
data['ticgridfileformat'] = ticgridfileformat
data['wcicoption'] = wcicoption
data['wcic'] = wcic
data['wcicgridfile'] = wcicgridfile
data['wcicgridfileformat'] = wcicgridfileformat
for key in data:
errors[key] = []
context = {}
not_empty_check = tk.get_validator('not_empty')
actions = []
if usicoption == 'Constant':
actions.append(lambda: not_empty_check('usic', data, errors, context))
data['usicgridfile'] = None
else:
if data['usicgridfile'] == '0': # value is 0 for option "Select a file ..'
data['usicgridfile'] = None
actions.append(lambda: not_empty_check('usicgridfile', data, errors, context))
actions.append(lambda: not_empty_check('usicgridfileformat', data, errors, context))
# check if the file selected as energy content file has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'usicgridfile', 'energy content'))
if wsisoption == 'Constant':
actions.append(lambda: not_empty_check('wsis', data, errors, context))
data['wsisgridfile'] = None
else:
if data['wsisgridfile'] == '0':
data['wsisgridfile'] = None
actions.append(lambda: not_empty_check('wsisgridfile', data, errors, context))
actions.append(lambda: not_empty_check('wsisgridfileformat', data, errors, context))
# check if the file selected as water equivalent file has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'wsisgridfile', 'water equivalent'))
if ticoption == 'Constant':
actions.append(lambda: not_empty_check('tic', data, errors, context))
data['ticgridfile'] = None
else:
if data['ticgridfile'] == '0':
data['ticgridfile'] = None
actions.append(lambda: not_empty_check('ticgridfile', data, errors, context))
actions.append(lambda: not_empty_check('ticgridfileformat', data, errors, context))
# check if the file selected as snow surface dimensionless age file has already been selected
# for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'ticgridfile', 'snow surface dimensionless age'))
if wcicoption == 'Constant':
actions.append(lambda: not_empty_check('wcic', data, errors, context))
data['wcicgridfile'] = None
else:
if data['wcicgridfile'] == '0':
data['wcicgridfile'] = None
actions.append(lambda: not_empty_check('wcicgridfile', data, errors, context))
actions.append(lambda: not_empty_check('wcicgridfileformat', data, errors, context))
# check if the file selected as canopy snow water equivalent file has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'wcicgridfile', 'snow water equivalent'))
if form_stage not in session:
session[form_stage] = {}
session[form_stage] = data
session.save()
for action in actions:
try:
action()
except:
pass
for key in errors:
# Get the error message for the form field (key)
value = errors.get(key)
if value: # error message exists
error_summary[key] = value
tk.c.form_stage = form_stage
stages = ['inactive', 'inactive', 'active', 'inactive', 'inactive', 'inactive', 'inactive',
'inactive', 'inactive']
form_vars['data'] = data
form_vars['stages'] = stages
form_vars['errors'] = errors
form_vars['error_summary'] = error_summary
return form_vars
def _validate_stage_four():
session = base.session
errors = {}
data = {}
error_summary = {}
stages = []
form_vars = {'data': data, 'errors': errors, 'error_summary':error_summary, 'stages':stages}
form_stage = 'stage_4'
dfoption = tk.request.params['dfoption']
df = tk.request.params['df']
dfgridfile = tk.request.params['dfgridfile']
dfgridfileformat = tk.request.params['dfgridfileformat']
aepoption = tk.request.params['aepoption']
aep = tk.request.params['aep']
aepgridfile = tk.request.params['aepgridfile']
aepgridfileformat = tk.request.params['aepgridfileformat']
sbaroption = tk.request.params['sbaroption']
sbar = tk.request.params['sbar']
sbargridfile = tk.request.params['sbargridfile']
sbargridfileformat = tk.request.params['sbargridfileformat']
subalboption = tk.request.params['subalboption']
subalb = tk.request.params['subalb']
subalbgridfile = tk.request.params['subalbgridfile']
subalbgridfileformat = tk.request.params['subalbgridfileformat']
subtypeoption = tk.request.params['subtypeoption']
subtype = tk.request.params['subtype']
subtypegridfile = tk.request.params['subtypegridfile']
subtypegridfileformat = tk.request.params['subtypegridfileformat']
gsurfoption = tk.request.params['gsurfoption']
gsurf = tk.request.params['gsurf']
gsurfgridfile = tk.request.params['gsurfgridfile']
gsurfgridfileformat = tk.request.params['gsurfgridfileformat']
tslastoption = tk.request.params['ts_lastoption']
tslast = tk.request.params['ts_last']
tslastgridfile = tk.request.params['ts_lastgridfile']
tslastgridfileformat = tk.request.params['ts_lastgridfileformat']
data['dfoption'] = dfoption
data['df'] = df
data['dfgridfile'] = dfgridfile
data['dfgridfileformat'] = dfgridfileformat
data['aepoption'] = aepoption
data['aep'] = aep
data['aepgridfile'] = aepgridfile
data['aepgridfileformat'] = aepgridfileformat
data['sbaroption'] = sbaroption
data['sbar'] = sbar
data['sbargridfile'] = sbargridfile
data['sbargridfileformat'] = sbargridfileformat
data['subalboption'] = subalboption
data['subalb'] = subalb
data['subalbgridfile'] = subalbgridfile
data['subalbgridfileformat'] = subalbgridfileformat
data['subtypeoption'] = subtypeoption
data['subtype'] = subtype
data['subtypegridfile'] = subtypegridfile
data['subtypegridfileformat'] = subtypegridfileformat
data['gsurfoption'] = gsurfoption
data['gsurf'] = gsurf
data['gsurfgridfile'] = gsurfgridfile
data['gsurfgridfileformat'] = gsurfgridfileformat
data['ts_lastoption'] = tslastoption
data['ts_last'] = tslast
data['ts_lastgridfile'] = tslastgridfile
data['ts_lastgridfileformat'] = tslastgridfileformat
for key in data:
errors[key] = []
context = {}
not_empty_check = tk.get_validator('not_empty')
actions = []
if dfoption == 'Constant':
actions.append(lambda: not_empty_check('df', data, errors, context))
data['dfgridfile'] = None
else:
if data['dfgridfile'] == '0': # value 0 if no file selected - Select a file...
data['dfgridfile'] = None
actions.append(lambda: not_empty_check('dfgridfile', data, errors, context))
actions.append(lambda: not_empty_check('dfgridfileformat', data, errors, context))
# check if the file selected as drift factor file has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'dfgridfile', 'drift factor'))
if aepoption == 'Constant':
actions.append(lambda: not_empty_check('aep', data, errors, context))
data['aepgridfile'] = None
else:
if data['aepgridfile'] == '0':
data['aepgridfile'] = None
actions.append(lambda: not_empty_check('aepgridfile', data, errors, context))
actions.append(lambda: not_empty_check('aepgridfileformat', data, errors, context))
# check if the file selected as albedo extinction coefficient file has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'aepgridfile', 'albedo extinction coefficient'))
if sbaroption == 'Constant':
actions.append(lambda: not_empty_check('sbar', data, errors, context))
data['sbargridfile'] = None
else:
if data['sbargridfile'] == '0':
data['sbargridfile'] = None
actions.append(lambda: not_empty_check('sbargridfile', data, errors, context))
actions.append(lambda: not_empty_check('sbargridfileformat', data, errors, context))
# check if the file selected as maximum snow load held per branch area file has already been
# selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'sbargridfile',
'maximum snow load held per branch area'))
if subalboption == 'Constant':
actions.append(lambda: not_empty_check('subalb', data, errors, context))
data['subalbgridfile'] = None
else:
if data['subalbgridfile'] == '0':
data['subalbgridfile'] = None
actions.append(lambda: not_empty_check('subalbgridfile', data, errors, context))
actions.append(lambda: not_empty_check('subalbgridfileformat', data, errors, context))
# check if the file selected as albedo of the substrate beneath the snow file has already been
# selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'subalbgridfile',
'albedo of the substrate beneath the snow'))
if subtypeoption == 'Constant':
actions.append(lambda: not_empty_check('subtype', data, errors, context))
data['subtypegridfile'] = None
else:
if data['subtypegridfile'] == '0':
data['subtypegridfile'] = None
actions.append(lambda: not_empty_check('subtypegridfile', data, errors, context))
actions.append(lambda: not_empty_check('subtypegridfileformat', data, errors, context))
# check if the file selected as type of beneath snow substrate encoded file has already been selected
# for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'subtypegridfile',
'type of beneath snow substrate encoded'))
if gsurfoption == 'Constant':
actions.append(lambda: not_empty_check('gsurf', data, errors, context))
data['gsurfgridfile'] = None
else:
if data['gsurfgridfile'] == '0':
data['gsurfgridfile'] = None
actions.append(lambda: not_empty_check('gsurfgridfile', data, errors, context))
actions.append(lambda: not_empty_check('gsurfgridfileformat', data, errors, context))
# check if the file selected as fraction of surface snow melt file has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'gsurfgridfile', 'fraction of surface snow melt'))
if tslastoption == 'Constant':
actions.append(lambda: not_empty_check('ts_last', data, errors, context))
data['ts_lastgridfile'] = None
else:
if data['ts_lastgridfile'] == '0':
data['ts_lastgridfile'] = None
actions.append(lambda: not_empty_check('ts_lastgridfile', data, errors, context))
actions.append(lambda: not_empty_check('ts_lastgridfileformat', data, errors, context))
# check if the file selected as snow surface temp prior to one day of simulation start day file
# has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'ts_lastgridfile', 'snow surface temperature'))
if form_stage not in session:
session[form_stage] = {}
session[form_stage] = data
session.save()
for action in actions:
try:
action()
except:
pass
for key in errors:
# Get the error message for the form field (key)
value = errors.get(key)
if value: # error message exists
error_summary[key] = value
tk.c.form_stage = form_stage
stages = ['inactive', 'inactive', 'inactive', 'active', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive']
form_vars['data'] = data
form_vars['stages'] = stages
form_vars['errors'] = errors
form_vars['error_summary'] = error_summary
return form_vars
def _validate_stage_five():
session = base.session
errors = {}
data = {}
error_summary = {}
stages = []
form_vars = {'data': data, 'errors': errors, 'error_summary': error_summary, 'stages': stages}
form_stage = 'stage_5'
ccoption = tk.request.params['ccoption']
cc = tk.request.params['cc']
ccgridfile = tk.request.params['ccgridfile']
ccgridfileformat = tk.request.params['ccgridfileformat']
hcanoption = tk.request.params['hcanoption']
hcan = tk.request.params['hcan']
hcangridfile = tk.request.params['hcangridfile']
hcangridfileformat = tk.request.params['hcangridfileformat']
laioption = tk.request.params['laioption']
lai = tk.request.params['lai']
laigridfile = tk.request.params['laigridfile']
laigridfileformat = tk.request.params['laigridfileformat']
ycageoption = tk.request.params['ycageoption']
ycage = tk.request.params['ycage']
ycagegridfile = tk.request.params['ycagegridfile']
ycagegridfileformat = tk.request.params['ycagegridfileformat']
data['ccoption'] = ccoption
data['cc'] = cc
data['ccgridfile'] = ccgridfile
data['ccgridfileformat'] = ccgridfileformat
data['hcanoption'] = hcanoption
data['hcan'] = hcan
data['hcangridfile'] = hcangridfile
data['hcangridfileformat'] = hcangridfileformat
data['laioption'] = laioption
data['lai'] = lai
data['laigridfile'] = laigridfile
data['laigridfileformat'] = laigridfileformat
data['ycageoption'] = ycageoption
data['ycage'] = ycage
data['ycagegridfile'] = ycagegridfile
data['ycagegridfileformat'] = ycagegridfileformat
for key in data:
errors[key] = []
context = {}
not_empty_check = tk.get_validator('not_empty')
actions = []
if ccoption == 'NLCD':
data['ccgridfile'] = None
elif ccoption == 'Constant':
actions.append(lambda: not_empty_check('cc', data, errors, context))
data['ccgridfile'] = None
elif ccoption == 'Grid':
if data['ccgridfile'] == '0': # value 0 if no file selected - Select a file...
data['ccgridfile'] = None
actions.append(lambda: not_empty_check('ccgridfile', data, errors, context))
actions.append(lambda: not_empty_check('ccgridfileformat', data, errors, context))
# check if the file selected as canopy coverage file
# has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'ccgridfile', 'canopy coverage'))
if hcanoption == 'NLCD':
data['hcangridfile'] = None
elif hcanoption == 'Constant':
actions.append(lambda: not_empty_check('hcan', data, errors, context))
data['hcangridfile'] = None
elif hcanoption == 'Grid':
if data['hcangridfile'] == '0':
data['hcangridfile'] = None
actions.append(lambda: not_empty_check('hcangridfile', data, errors, context))
actions.append(lambda: not_empty_check('hcangridfileformat', data, errors, context))
# check if the file selected for canopy height
# has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'hcangridfile', 'canopy height'))
if laioption == 'NLCD':
data['laigridfile'] = None
elif laioption == 'Constant':
actions.append(lambda: not_empty_check('lai', data, errors, context))
data['laigridfile'] = None
elif laioption == 'Grid':
if data['laigridfile'] == '0':
data['laigridfile'] = None
actions.append(lambda: not_empty_check('laigridfile', data, errors, context))
actions.append(lambda: not_empty_check('laigridfileformat', data, errors, context))
# check if the file selected for leaf area index
# has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'laigridfile', 'leaf area index'))
if ycageoption == 'NLCD':
data['ycagegridfile'] = None
elif ycageoption == 'Constant':
actions.append(lambda: not_empty_check('ycage', data, errors, context))
data['ycagegridfile'] = None
elif ycageoption == 'Grid':
if data['ycagegridfile'] == '0':
data['ycagegridfile'] = None
actions.append(lambda: not_empty_check('ycagegridfile', data, errors, context))
actions.append(lambda: not_empty_check('ycagegridfileformat', data, errors, context))
# check if the file selected for forest age flag
# has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'ycagegridfile', 'forest age flag'))
if form_stage not in session:
session[form_stage] = {}
session[form_stage] = data
session.save()
for action in actions:
try:
action()
except:
pass
for key in errors:
# Get the error message for the form field (key)
value = errors.get(key)
if value: # error message exists
error_summary[key] = value
tk.c.form_stage = form_stage
stages = ['inactive', 'inactive', 'inactive', 'inactive', 'active', 'inactive', 'inactive', 'inactive', 'inactive']
form_vars['data'] = data
form_vars['stages'] = stages
form_vars['errors'] = errors
form_vars['error_summary'] = error_summary
return form_vars
def _validate_stage_six():
session = base.session
errors = {}
data = {}
error_summary = {}
stages = []
form_vars = {'data': data, 'errors': errors, 'error_summary': error_summary, 'stages': stages}
form_stage = 'stage_6'
aproption = tk.request.params['aproption']
apr = tk.request.params['apr']
aprgridfile = tk.request.params['aprgridfile']
aprgridfileformat = tk.request.params['aprgridfileformat']
slopeoption = tk.request.params['slopeoption']
slope = tk.request.params['slope']
slopegridfile = tk.request.params['slopegridfile']
slopegridfileformat = tk.request.params['slopegridfileformat']
aspectoption = tk.request.params['aspectoption']
aspect = tk.request.params['aspect']
aspectgridfile = tk.request.params['aspectgridfile']
aspectgridfileformat = tk.request.params['aspectgridfileformat']
latoption = tk.request.params['latitudeoption']
latitude = tk.request.params['latitude']
latitudegridfile = tk.request.params['latitudegridfile']
latitudegridfileformat = tk.request.params['latitudegridfileformat']
lonoption = tk.request.params['longitudeoption']
longitude = tk.request.params['longitude']
longitudegridfile = tk.request.params['longitudegridfile']
longitudegridfileformat = tk.request.params['longitudegridfileformat']
data['aproption'] = aproption
data['apr'] = apr
data['aprgridfile'] = aprgridfile
data['aprgridfileformat'] = aprgridfileformat
data['slopeoption'] = slopeoption
data['slope'] = slope
data['slopegridfile'] = slopegridfile
data['slopegridfileformat'] = slopegridfileformat
data['aspectoption'] = aspectoption
data['aspect'] = aspect
data['aspectgridfile'] = aspectgridfile
data['aspectgridfileformat'] = aspectgridfileformat
data['latitudeoption'] = latoption
data['latitude'] = latitude
data['latitudegridfile'] = latitudegridfile
data['latitudegridfileformat'] = latitudegridfileformat
data['longitudeoption'] = lonoption
data['longitude'] = longitude
data['longitudegridfile'] = longitudegridfile
data['longitudegridfileformat'] = longitudegridfileformat
for key in data:
errors[key] = []
context = {}
not_empty_check = tk.get_validator('not_empty')
actions = []
if aproption == 'Compute':
data['aprgridfile'] = None
elif aproption == 'Constant':
actions.append(lambda: not_empty_check('apr', data, errors, context))
data['aprgridfile'] = None
elif aproption == 'Grid':
if data['aprgridfile'] == '0': # value 0 if no file selected - Select a file...
data['aprgridfile'] = None
actions.append(lambda: not_empty_check('aprgridfile', data, errors, context))
actions.append(lambda: not_empty_check('aprgridfileformat', data, errors, context))
# check if the file selected for average atmospheric pressure
# has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'aprgridfile', 'average atmospheric'))
if slopeoption == 'Compute':
data['slopegridfile'] = None
elif slopeoption == 'Constant':
actions.append(lambda: not_empty_check('slope', data, errors, context))
data['slopegridfile'] = None
elif slopeoption == 'Grid':
if data['slopegridfile'] == '0':
data['slopegridfile'] = None
actions.append(lambda: not_empty_check('slopegridfile', data, errors, context))
actions.append(lambda: not_empty_check('slopegridfileformat', data, errors, context))
# check if the file selected for slope
# has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'slopegridfile', 'slope'))
if aspectoption == 'Compute':
data['aspectgridfile'] = None
elif aspectoption == 'Constant':
actions.append(lambda: not_empty_check('aspect', data, errors, context))
data['aspectgridfile'] = None
elif aspectoption == 'Grid':
if data['aspectgridfile'] == '0':
data['aspectgridfile'] = None
actions.append(lambda: not_empty_check('aspectgridfile', data, errors, context))
actions.append(lambda: not_empty_check('aspectgridfileformat', data, errors, context))
# check if the file selected for aspect
# has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'aspectgridfile', 'aspect'))
if latoption == 'Compute':
data['latitudegridfile'] = None
elif latoption == 'Constant':
actions.append(lambda: not_empty_check('latitude', data, errors, context))
data['latitudegridfile'] = None
elif latoption == 'Grid':
if data['latitudegridfile'] == '0':
data['latitudegridfile'] = None
actions.append(lambda: not_empty_check('latitudegridfile', data, errors, context))
actions.append(lambda: not_empty_check('latitudegridfileformat', data, errors, context))
# check if the file selected for latitude
# has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'latitudegridfile', 'latitude'))
if lonoption == 'Compute':
data['longitudegridfile'] = None
elif lonoption == 'Constant':
actions.append(lambda: not_empty_check('longitude', data, errors, context))
data['longitudegridfile'] = None
elif lonoption == 'Grid':
if data['longitudegridfile'] == '0':
data['longitudegridfile'] = None
actions.append(lambda: not_empty_check('longitudegridfile', data, errors, context))
actions.append(lambda: not_empty_check('longitudegridfileformat', data, errors, context))
# check if the file selected for longitude
# has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'longitudegridfile', 'longitude'))
if form_stage not in session:
session[form_stage] = {}
session[form_stage] = data
session.save()
for action in actions:
try:
action()
except:
pass
for key in errors:
# Get the error message for the form field (key)
value = errors.get(key)
if value: # error message exists
error_summary[key] = value
tk.c.form_stage = form_stage
stages = ['inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'active', 'inactive', 'inactive', 'inactive']
form_vars['data'] = data
form_vars['stages'] = stages
form_vars['errors'] = errors
form_vars['error_summary'] = error_summary
return form_vars
def _validate_stage_seven():
session = base.session
errors = {}
data = {}
error_summary = {}
stages = []
form_vars = {'data': data, 'errors': errors, 'error_summary':error_summary, 'stages':stages}
form_stage = 'stage_7'
months = ['january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september',
'october', 'november', 'december']
for month in months:
temp_for_month = tk.request.params[month]
data[month] = temp_for_month
errors[month] = []
context = {}
if form_stage not in session:
session[form_stage] = {}
session[form_stage] = data
session.save()
not_empty_check = tk.get_validator('not_empty')
for month in months:
try:
not_empty_check(month, data, errors, context)
except:
pass
if len(errors[month]) == 0:
# Check for data type numeric
try:
float(data[month])
except ValueError:
errors[month].append('Temperature should be a numeric value')
for key in errors:
# Get the error message for the form field (key)
value = errors.get(key)
if value: # error message exists
error_summary[key] = value
tk.c.form_stage = form_stage
stages = ['inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'active', 'inactive', 'inactive']
form_vars['data'] = data
form_vars['stages'] = stages
form_vars['errors'] = errors
form_vars['error_summary'] = error_summary
return form_vars
def _validate_stage_eight():
session = base.session
errors = {}
data = {}
error_summary = {}
stages = []
form_vars = {'data': data, 'errors': errors, 'error_summary': error_summary, 'stages': stages}
form_stage = 'stage_8'
tempoption = tk.request.params['taoption']
temperature = tk.request.params['ta']
temptextfile = tk.request.params['tatextfile']
tempgridfile = tk.request.params['tagridfile']
tempgridfileformat = tk.request.params['tagridfileformat']
precoption = tk.request.params['precoption']
precipitation = tk.request.params['prec']
prectextfile = tk.request.params['prectextfile']
precgridfile = tk.request.params['precgridfile']
precgridfileformat = tk.request.params['precgridfileformat']
windoption = tk.request.params['voption']
wind = tk.request.params['v']
windtextfile = tk.request.params['vtextfile']
windgridfile = tk.request.params['vgridfile']
windgridfileformat = tk.request.params['vgridfileformat']
rhoption = tk.request.params['rhoption']
rh = tk.request.params['rh']
rhtextfile = tk.request.params['rhtextfile']
rhgridfile = tk.request.params['rhgridfile']
rhgridfileformat = tk.request.params['rhgridfileformat']
snowalboption = tk.request.params['snowalboption']
snowalb = tk.request.params['snowalb']
snowalbtextfile = tk.request.params['snowalbtextfile']
snowalbgridfile = tk.request.params['snowalbgridfile']
snowalbgridfileformat = tk.request.params['snowalbgridfileformat']
qgoption = tk.request.params['qgoption']
qg = tk.request.params['qg']
qgtextfile = tk.request.params['qgtextfile']
qggridfile = tk.request.params['qggridfile']
qggridfileformat = tk.request.params['qggridfileformat']
data['taoption'] = tempoption
data['ta'] = temperature
data['tatextfile'] = temptextfile
data['tagridfile'] = tempgridfile
data['tagridfileformat'] = tempgridfileformat
data['precoption'] = precoption
data['prec'] = precipitation
data['prectextfile'] = prectextfile
data['precgridfile'] = precgridfile
data['precgridfileformat'] = precgridfileformat
data['voption'] = windoption
data['v'] = wind
data['vtextfile'] = windtextfile
data['vgridfile'] = windgridfile
data['vgridfileformat'] = windgridfileformat
data['rhoption'] = rhoption
data['rh'] = rh
data['rhtextfile'] = rhtextfile
data['rhgridfile'] = rhgridfile
data['rhgridfileformat'] = rhgridfileformat
data['snowalboption'] = snowalboption
data['snowalb'] = snowalb
data['snowalbtextfile'] = snowalbtextfile
data['snowalbgridfile'] = snowalbgridfile
data['snowalbgridfileformat'] = snowalbgridfileformat
data['qgoption'] = qgoption
data['qg'] = qg
data['qgtextfile'] = qgtextfile
data['qggridfile'] = qggridfile
data['qggridfileformat'] = qggridfileformat
for key in data:
errors[key] = []
context = {}
not_empty_check = tk.get_validator('not_empty')
actions = []
if tempoption == 'Compute':
data['tatextfile'] = None
data['tagridfile'] = None
elif tempoption == 'Constant':
actions.append(lambda: not_empty_check('ta', data, errors, context))
data['tatextfile'] = None
data['tagridfile'] = None
elif tempoption == 'Text':
data['tagridfile'] = None
if data['tatextfile'] == '0': # value 0 represents the option- Select a a file..
data['tatextfile'] = None
actions.append(lambda: not_empty_check('tatextfile', data, errors, context))
# check if the file selected for temp
# has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'tatextfile', 'temperature'))
elif tempoption == 'Grid':
data['tatextfile'] = None
if data['tagridfile'] == '0':
data['tagridfile'] = None
actions.append(lambda: not_empty_check('tagridfile', data, errors, context))
actions.append(lambda: not_empty_check('tagridfileformat', data, errors, context))
# check if the file selected for temperature
# has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'tagridfile', 'temperature'))
if precoption == 'Compute':
data['prectextfile'] = None
data['precgridfile'] = None
elif precoption == 'Constant':
actions.append(lambda: not_empty_check('prec', data, errors, context))
data['prectextfile'] = None
data['precgridfile'] = None
elif precoption == 'Text':
data['precgridfile'] = None
if data['prectextfile'] == '0':
data['prectextfile'] = None
actions.append(lambda: not_empty_check('prectextfile', data, errors, context))
# check if the file selected for precipitation
# has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'prectextfile', 'precipitation'))
elif precoption == 'Grid':
data['prectextfile'] = None
if data['precgridfile'] == '0':
data['precgridfile'] = None
actions.append(lambda: not_empty_check('precgridfile', data, errors, context))
actions.append(lambda: not_empty_check('precgridfileformat', data, errors, context))
# check if the file selected for precipitation
# has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'precgridfile', 'precipitation'))
if windoption == 'Compute':
data['vtextfile'] = None
data['vgridfile'] = None
elif windoption == 'Constant':
actions.append(lambda: not_empty_check('v', data, errors, context))
data['vtextfile'] = None
data['vgridfile'] = None
elif windoption == 'Text':
data['vgridfile'] = None
if data['vtextfile'] == '0':
data['vtextfile'] = None
actions.append(lambda: not_empty_check('vtextfile', data, errors, context))
# check if the file selected for wind
# has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'vtextfile', 'wind'))
elif windoption == 'Grid':
data['vtextfile'] = None
if data['vgridfile'] == '0':
data['vgridfile'] = None
actions.append(lambda: not_empty_check('vgridfile', data, errors, context))
actions.append(lambda: not_empty_check('vgridfileformat', data, errors, context))
# check if the file selected for wind
# has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'vgridfile', 'wind'))
if rhoption == 'Compute':
data['rhtextfile'] = None
data['rhgridfile'] = None
elif rhoption == 'Constant':
actions.append(lambda: not_empty_check('rh', data, errors, context))
data['rhtextfile'] = None
data['rhgridfile'] = None
elif rhoption == 'Text':
data['rhgridfile'] = None
if data['rhtextfile'] == '0':
data['rhtextfile'] = None
actions.append(lambda: not_empty_check('rhtextfile', data, errors, context))
# check if the file selected for rh
# has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'rhtextfile', 'relative humidity'))
elif rhoption == 'Grid':
data['rhtextfile'] = None
if data['rhgridfile'] == '0':
data['rhgridfile'] = None
actions.append(lambda: not_empty_check('rhgridfile', data, errors, context))
actions.append(lambda: not_empty_check('rhgridfileformat', data, errors, context))
# check if the file selected for rh
# has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'rhgridfile', 'relative humidity'))
if snowalboption == 'Compute':
data['snowalbtextfile'] = None
data['snowalbgridfile'] = None
elif snowalboption == 'Constant':
actions.append(lambda: not_empty_check('snowalb', data, errors, context))
data['snowalbtextfile'] = None
data['snowalbgridfile'] = None
elif snowalboption == 'Text':
data['snowalbgridfile'] = None
if data['snowalbtextfile'] == '0':
data['snowalbtextfile'] = None
actions.append(lambda: not_empty_check('snowalbtextfile', data, errors, context))
# check if the file selected for snow albedo
# has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'snowalbtextfile', 'snow albedo'))
elif snowalboption == 'Grid':
data['snowalbtextfile'] = None
if data['snowalbgridfile'] == '0':
data['snowalbgridfile'] = None
actions.append(lambda: not_empty_check('snowalbgridfile', data, errors, context))
actions.append(lambda: not_empty_check('snowalbgridfileformat', data, errors, context))
# check if the file selected for snow albedo
# has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'snowalbgridfile', 'snow albedo'))
if qgoption == 'Constant':
actions.append(lambda: not_empty_check('qg', data, errors, context))
data['qgtextfile'] = None
data['qggridfile'] = None
elif qgoption == 'Text':
data['qggridfile'] = None
if data['qgtextfile'] == '0':
data['qgtextfile'] = None
actions.append(lambda: not_empty_check('qgtextfile', data, errors, context))
# check if the file selected for ground heat flux
# has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'qgtextfile', 'ground heat flux'))
elif qgoption == 'Grid':
data['qgtextfile'] = None
if data['qggridfile'] == '0':
data['qggridfile'] = None
actions.append(lambda: not_empty_check('qggridfile', data, errors, context))
actions.append(lambda: not_empty_check('qggridfileformat', data, errors, context))
# check if the file selected for ground heat flux
# has already been selected for any other input
actions.append(lambda: _validate_file_selection(data, errors, 'qggridfile', 'ground heat flux'))
if form_stage not in session:
session[form_stage] = {}
session[form_stage] = data
session.save()
for action in actions:
try:
action()
except:
pass
#check for constant value if exits that they are numeric type
if tempoption == 'Constant' and len(errors['ta']) == 0:
try:
float(data['ta'])
except ValueError:
errors['ta'].append('Temperature should be a numeric value')
if precoption == 'Constant' and len(errors['prec']) == 0:
try:
float(data['prec'])
except ValueError:
errors['prec'].append('Precipitation should be a numeric value')
if windoption == 'Constant' and len(errors['v']) == 0:
try:
float(data['v'])
except ValueError:
errors['v'].append('Wind speed should be a numeric value')
if rhoption == 'Constant' and len(errors['rh']) == 0:
try:
float(data['rh'])
except ValueError:
errors['rh'].append('Relative humidity should be a numeric value')
if snowalboption == 'Constant' and len(errors['snowalb']) == 0:
try:
float(data['snowalb'])
except ValueError:
errors['snowalb'].append('Snow albedo should be a numeric value')
if qgoption == 'Constant' and len(errors['qg']) == 0:
try:
float(data['qg'])
except ValueError:
errors['qg'].append('Ground heat flux should be a numeric value')
for key in errors:
# Get the error message for the form field (key)
value = errors.get(key)
if value: # error message exists
error_summary[key] = value
tk.c.form_stage = form_stage
stages = ['inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'active', 'inactive']
form_vars['data'] = data
form_vars['stages'] = stages
form_vars['errors'] = errors
form_vars['error_summary'] = error_summary
return form_vars
def _validate_stage_nine():
session = base.session
errors = {}
data = {}
error_summary = {}
stages = []
form_vars = {'data': data, 'errors': errors, 'error_summary': error_summary, 'stages': stages}
form_stage = 'stage_9'
outputControlFileOption = tk.request.params['outputControlFileOption']
outputControlFile = tk.request.params['outputControlFile']
aggrOutputControlFileOption = tk.request.params['aggrOutputControlFileOption']
aggrOutputControlFile = tk.request.params['aggrOutputControlFile']
data['outputControlFileOption'] = outputControlFileOption
data['outputControlFile'] = outputControlFile
data['aggrOutputControlFileOption'] = aggrOutputControlFileOption
data['aggrOutputControlFile'] = aggrOutputControlFile
for key in data:
errors[key] = []
context = {}
if form_stage not in session:
session[form_stage] = {}
session[form_stage] = data
session.save()
not_empty_check = tk.get_validator('not_empty') # not_empty(key, data, errors, context):
if outputControlFileOption == 'No':
if data['outputControlFile'] == '0':
data['outputControlFile'] = None
try:
not_empty_check('outputControlFile', data, errors, context)
except:
pass
# check if the file selected for output control file
# has already been selected for any other input
_validate_file_selection(data, errors, 'outputControlFile', 'output control')
else:
data['outputControlFile'] = None
if aggrOutputControlFileOption == 'No':
if data['aggrOutputControlFile'] == '0':
data['aggrOutputControlFile'] = None
try:
not_empty_check('aggrOutputControlFile', data, errors, context)
except:
pass
# check if the file selected for aggregated output control
# has already been selected for any other input
_validate_file_selection(data, errors, 'aggrOutputControlFile', 'aggregated output control')
else:
data['aggrOutputControlFile'] = None
for key in errors:
# Get the error message for the form field (key)
value = errors.get(key)
if value: # error message exists
error_summary[key] = value
tk.c.form_stage = form_stage
stages = ['inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'inactive', 'active']
form_vars['data'] = data
form_vars['stages'] = stages
form_vars['errors'] = errors
form_vars['error_summary'] = error_summary
return form_vars
def _validate_file_selection(data, errors, file_form_field_name, file_name):
session = base.session
for frm_stage in session:
if frm_stage not in ['stage_1', 'stage_2', 'stage_3', 'stage_4', 'stage_5', 'stage_6', 'stage_7',
'stage_8', 'stage_9']:
continue
form_stage_data = session[frm_stage]
for key, value in form_stage_data.items():
if value == '0' or not value: # no file selected
continue
if key == file_form_field_name:
continue
if data[file_form_field_name] == value:
errors[file_form_field_name].append('The file you have selected as %s file is already '
'selected for another input.' % file_name)
return
def _get_default_data(form_stage):
if form_stage == 'stage_2':
return _get_default_data_stage_two()
elif form_stage == 'stage_3':
return _get_default_data_stage_three()
elif form_stage == 'stage_4':
return _get_default_data_stage_four()
elif form_stage == 'stage_5':
return _get_default_data_stage_five()
elif form_stage == 'stage_6':
return _get_default_data_stage_six()
elif form_stage == 'stage_7':
return _get_default_data_stage_seven()
elif form_stage == 'stage_8':
return _get_default_data_stage_eight()
elif form_stage == 'stage_9':
return _get_default_data_stage_nine()
def _get_default_data_stage_two():
data = {'parametersfileoption': 'Yes'}
return data
def _get_default_data_stage_three():
# TODO: read these hard coded values from a file that can exist in the UEB Input Dataset
data = {}
data['usicoption'] = 'Constant'
data['usic'] = 0
data['wsisoption'] = 'Constant'
data['wsis'] = 0
data['ticoption'] = 'Constant'
data['tic'] = 0
data['wcicoption'] = 'Constant'
data['wcic'] = 0
return data
def _get_default_data_stage_four():
# TODO: read these hard coded values from a file that can exist in the UEB Input Dataset
data = {}
data['dfoption'] = 'Constant'
data['df'] = 1
data['aepoption'] = 'Constant'
data['aep'] = 0.1
data['sbaroption'] = 'Constant'
data['sbar'] = 6.6
data['subalboption'] = 'Constant'
data['subalb'] = 0.25
data['subtypeoption'] = 'Constant'
data['subtype'] = 0
data['gsurfoption'] = 'Constant'
data['gsurf'] = 0
data['ts_lastoption'] = 'Constant'
data['ts_last'] = -9999
return data
def _get_default_data_stage_five():
data = {}
data['ccoption'] = 'NLCD'
data['hcanoption'] = 'NLCD'
data['laioption'] = 'NLCD'
data['ycageoption'] = 'NLCD'
return data
def _get_default_data_stage_six():
data = {}
data['aproption'] = 'Compute'
data['slopeoption'] = 'Compute'
data['aspectoption'] = 'Compute'
data['latitudeoption'] = 'Compute'
data['longitudeoption'] = 'Compute'
return data
def _get_default_data_stage_seven():
# TODO: read these hard coded values from a file that can exist in the UEB Input Dataset
data = {}
data['january'] = 6.743
data['february'] = 7.927
data['march'] = 8.055
data['april'] = 8.602
data['may'] = 8.43
data['june'] = 9.76
data['july'] = 0
data['august'] = 0
data['september'] = 0
data['october'] = 7.4
data['november'] = 9.14
data['december'] = 6.67
return data
def _get_default_data_stage_eight():
data = {}
data['taoption'] = 'Compute'
data['precoption'] = 'Compute'
data['voption'] = 'Compute'
data['rhoption'] = 'Compute'
data['snowalboption'] = 'Compute'
data['qgoption'] = 'Constant'
data['qg'] = 0
return data
def _get_default_data_stage_nine():
data = {}
data['outputControlFileOption'] = 'Yes'
data['aggrOutputControlFileOption'] = 'Yes'
return data
def _get_predefined_name(predefined_name_key):
predefined_names = {}
predefined_names['ckan_user_session_temp_dir'] = '/tmp/ckan'
predefined_names['ueb_request_json_file_name'] = 'ueb_pkg_request.json'
predefined_names['ueb_request_text_resource_file_name'] = 'ueb_pkg_request.txt'
predefined_names['ueb_request_zip_file_name'] = 'ueb_request.zip'
return predefined_names.get(predefined_name_key, None)
def _get_package_request_in_json_format():
# TODO: Note that the session object may not have data for all stages of the
# form if the user did not navigate to all stages. Prior to accessing data for a stage
# from the session object, check that the data for the specific stage exits in session
# object, otherwise implement a method to get default data for any give stage of the form
selected_file_ids = {}
ueb_request_data = {}
session = base.session
tk.c.ueb_input_sections = []
tk.c.ueb_input_section_data_items = {}
ueb_default_dataset = _get_package('ueb-default-configuration-dataset')
ueb_default_input_dataset_id = ueb_default_dataset['id'] # '0e39dd8f-85ac-4bee-b18c-ac5cb7d60328'
stage_1_data = session['stage_1']
section_name = 'Model domain setup'
tk.c.ueb_input_sections.append(section_name)
#tk.c.ueb_input_section_data_items.append('Set package')
tk.c.ueb_input_section_data_items[section_name] = {}
tk.c.ueb_input_section_data_items[section_name]['order'] = ['Start date:', 'End date:', 'Time step:',
'Buffer size:', 'Grid cell size:', 'Domain file name:']
tk.c.ueb_input_section_data_items[section_name]['Start date:'] = stage_1_data['startdate']
ueb_request_data['StartDate'] = stage_1_data['startdate']
tk.c.ueb_input_section_data_items[section_name]['End date:'] = stage_1_data['enddate']
ueb_request_data['EndDate'] = stage_1_data['enddate']
tk.c.ueb_input_section_data_items[section_name]['Time step:'] = stage_1_data['timestep']
ueb_request_data['TimeStep'] = stage_1_data['timestep']
tk.c.ueb_input_section_data_items[section_name]['Buffer size:'] = stage_1_data['buffersize']
ueb_request_data['BufferSize'] = stage_1_data['buffersize']
tk.c.ueb_input_section_data_items[section_name]['Grid cell size:'] = stage_1_data['gridcellsize']
ueb_request_data['GridCellSize'] = stage_1_data['gridcellsize']
if stage_1_data['domainfiletypeoption'] == 'polygon':
ueb_request_data['DomainFileName'] = _get_file_name_from_file_id(stage_1_data['domainshapefile'])
selected_file_ids['domain_file_id'] = stage_1_data['domainshapefile']
else:
ueb_request_data['DomainFileName'] = _get_file_name_from_file_id(stage_1_data['domainnetcdffile'])
ueb_request_data['DomainGridFileFormat'] = stage_1_data['domainnetcdffileformat']
selected_file_ids['domain_file_id'] = stage_1_data['domainnetcdffile']
tk.c.ueb_input_section_data_items[section_name]['Domain file name:'] = ueb_request_data['DomainFileName']
stage_2_data = session.get('stage_2', None)
section_name = 'Model parameters setup'
tk.c.ueb_input_sections.append(section_name)
tk.c.ueb_input_section_data_items[section_name] = {}
tk.c.ueb_input_section_data_items[section_name]['order'] = []
tk.c.ueb_input_section_data_items[section_name]['order'].append('Use default parameter file:')
if not stage_2_data:
# get the default data
stage_2_data = _get_default_data_stage_two()
default_parameter_file_name = 'param.dat'
if stage_2_data['parametersfileoption'] == 'Yes':
selected_file_ids['parameters_file_id'] = _get_file_id_from_file_name(ueb_default_input_dataset_id,
default_parameter_file_name)
ueb_request_data['ModelParametersFileName'] = default_parameter_file_name
tk.c.ueb_input_section_data_items[section_name]['Use default parameter file:'] = 'Yes'
else:
selected_file_ids['parameters_file_id'] = stage_2_data['parametersfile']
ueb_request_data['ModelParametersFileName'] = _get_file_name_from_file_id(selected_file_ids['parameters_file_id'])
tk.c.ueb_input_section_data_items[section_name]['Use default parameter file:'] = 'No'
tk.c.ueb_input_section_data_items[section_name]['Parameter file name:'] = \
ueb_request_data['ModelParametersFileName']
tk.c.ueb_input_section_data_items[section_name]['order'].append('Parameter file name:')
stage_3_data = session.get('stage_3', None)
if not stage_3_data:
# get the default data
stage_3_data = _get_default_data_stage_three()
ueb_request_data['SiteInitialConditions'] = {}
section_name = 'Site initial condition - state variables setup'
tk.c.ueb_input_sections.append(section_name)
tk.c.ueb_input_section_data_items[section_name] = {}
tk.c.ueb_input_section_data_items[section_name]['order'] = []
stage_3_variables = ['usic', 'wsis', 'tic', 'wcic']
for var in stage_3_variables:
ueb_request_data['SiteInitialConditions']['is_' + var + '_constant'] = False
ueb_request_data['SiteInitialConditions'][var + '_constant_value'] = 0
ueb_request_data['SiteInitialConditions'][var + '_grid_file_name'] = ''
ueb_request_data['SiteInitialConditions'][var + '_grid_file_format'] = ''
if stage_3_data[var + 'option'] == 'Constant':
ueb_request_data['SiteInitialConditions']['is_' + var + '_constant'] = True
ueb_request_data['SiteInitialConditions'][var + '_constant_value'] = stage_3_data[var]
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-use constant value:')
tk.c.ueb_input_section_data_items[section_name][var + '-use constant value:'] = 'Yes '
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-constant value:')
tk.c.ueb_input_section_data_items[section_name][var + '-constant value:'] = stage_3_data[var]
else:
selected_file_ids[var + '_grid_file_id'] = stage_3_data[var + 'gridfile']
ueb_request_data['SiteInitialConditions'][var + '_grid_file_name'] = _get_file_name_from_file_id(
stage_3_data[var + 'gridfile'])
ueb_request_data['SiteInitialConditions'][var + '_grid_file_format'] = stage_3_data[var + 'gridfileformat']
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-use constant value:')
tk.c.ueb_input_section_data_items[section_name][var + '-use constant value:'] = 'No '
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-grid file name:')
tk.c.ueb_input_section_data_items[section_name][var + '-grid file name:'] = \
ueb_request_data['SiteInitialConditions'][var + '_grid_file_name']
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-grid file format:')
tk.c.ueb_input_section_data_items[section_name][var + '-grid file name:'] = \
stage_3_data[var + 'gridfileformat']
stage_4_data = session.get('stage_4', None)
if not stage_4_data:
# get the default data
stage_4_data = _get_default_data_stage_four()
section_name = 'Site initial condition - snow variables setup'
tk.c.ueb_input_sections.append(section_name)
tk.c.ueb_input_section_data_items[section_name] = {}
tk.c.ueb_input_section_data_items[section_name]['order'] = []
stage_4_variables = ['df', 'aep', 'sbar', 'subalb', 'subtype', 'gsurf', 'ts_last']
for var in stage_4_variables:
ueb_request_data['SiteInitialConditions']['is_' + var + '_constant'] = False
ueb_request_data['SiteInitialConditions'][var + '_constant_value'] = 0
ueb_request_data['SiteInitialConditions'][var + '_grid_file_name'] = ''
ueb_request_data['SiteInitialConditions'][var + '_grid_file_format'] = ''
if stage_4_data[var + 'option'] == 'Constant':
ueb_request_data['SiteInitialConditions']['is_' + var + '_constant'] = True
ueb_request_data['SiteInitialConditions'][var + '_constant_value'] = stage_4_data[var]
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-use constant value:')
tk.c.ueb_input_section_data_items[section_name][var + '-use constant value:'] = 'Yes '
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-constant value:')
tk.c.ueb_input_section_data_items[section_name][var + '-constant value:'] = stage_4_data[var]
else:
selected_file_ids[var + '_grid_file_id'] = stage_4_data[var + 'gridfile']
ueb_request_data['SiteInitialConditions'][var + '_grid_file_name'] = \
_get_file_name_from_file_id(stage_4_data[var + 'gridfile'])
ueb_request_data['SiteInitialConditions'][var + '_grid_file_format'] = \
stage_4_data[var + 'gridfileformat']
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-use constant value:')
tk.c.ueb_input_section_data_items[section_name][var + '-use constant value:'] = 'No '
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-grid file name:')
tk.c.ueb_input_section_data_items[section_name][var + '-grid file name:'] =\
ueb_request_data['SiteInitialConditions'][var + '_grid_file_name']
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-grid file format:')
tk.c.ueb_input_section_data_items[section_name][var + '-grid file name:'] =\
stage_4_data[var + 'gridfileformat']
stage_5_data = session.get('stage_5', None)
if not stage_5_data:
# get the default data
stage_5_data = _get_default_data_stage_five()
# set data for confirmation page
section_name = 'Site initial condition - land cover variables setup'
tk.c.ueb_input_sections.append(section_name)
tk.c.ueb_input_section_data_items[section_name] = {}
tk.c.ueb_input_section_data_items[section_name]['order'] = []
stage_5_variables = ['cc', 'hcan', 'lai', 'ycage']
for var in stage_5_variables:
ueb_request_data['SiteInitialConditions'][var + '_constant_value'] = 0
ueb_request_data['SiteInitialConditions'][var + '_grid_file_name'] = ''
ueb_request_data['SiteInitialConditions'][var + '_grid_file_format'] = ''
ueb_request_data['SiteInitialConditions']['is_' + var + '_constant'] = False
ueb_request_data['SiteInitialConditions']['is_' + var + '_derive_from_NLCD'] = False
if stage_5_data[var + 'option'] == 'Constant':
ueb_request_data['SiteInitialConditions']['is_' + var + '_constant'] = True
ueb_request_data['SiteInitialConditions'][var + '_constant_value'] = stage_5_data[var]
# set data for confirmation page
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-use constant value:')
tk.c.ueb_input_section_data_items[section_name][var + '-use constant value:'] = 'Yes '
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-constant value:')
tk.c.ueb_input_section_data_items[section_name][var + '-constant value:'] = stage_5_data[var]
elif stage_5_data[var + 'option'] == 'NLCD':
ueb_request_data['SiteInitialConditions']['is_' + var + '_derive_from_NLCD'] = True
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-derive from NLCD:')
tk.c.ueb_input_section_data_items[section_name][var + '-derive from NLCD:'] = 'Yes '
else:
selected_file_ids[var + '_grid_file_id'] = stage_5_data[var + 'gridfile']
ueb_request_data['SiteInitialConditions'][var + '_grid_file_name'] = \
_get_file_name_from_file_id(stage_5_data[var + 'gridfile'])
ueb_request_data['SiteInitialConditions'][var + '_grid_file_format'] = stage_5_data[var + 'gridfileformat']
# set data for confirmation page
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-use constant value:')
tk.c.ueb_input_section_data_items[section_name][var + '-use constant value:'] = 'No '
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-derive from NLCD:')
tk.c.ueb_input_section_data_items[section_name][var + '-derive from NLCD:'] = 'No '
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-grid file name:')
tk.c.ueb_input_section_data_items[section_name][var + '-grid file name:'] = \
ueb_request_data['SiteInitialConditions'][var + '_grid_file_name']
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-grid file format:')
tk.c.ueb_input_section_data_items[section_name][var + '-grid file name:'] = \
stage_5_data[var + 'gridfileformat']
stage_6_data = session.get('stage_6', None)
if not stage_6_data:
# get the default data
stage_6_data = _get_default_data_stage_six()
# set data for confirmation page
section_name = 'Site initial condition - geographic variables setup'
tk.c.ueb_input_sections.append(section_name)
tk.c.ueb_input_section_data_items[section_name] = {}
tk.c.ueb_input_section_data_items[section_name]['order'] = []
stage_6_variables = ['apr', 'slope', 'aspect', 'latitude', 'longitude']
for var in stage_6_variables:
ueb_request_data['SiteInitialConditions'][var + '_constant_value'] = 0
ueb_request_data['SiteInitialConditions'][var + '_grid_file_name'] = ''
ueb_request_data['SiteInitialConditions'][var + '_grid_file_format'] = ''
ueb_request_data['SiteInitialConditions']['is_' + var + '_constant'] = False
if var == 'latitude' or var == 'longitude':
ueb_request_data['SiteInitialConditions']['is_' + var + '_derive_from_projection'] = False
else:
ueb_request_data['SiteInitialConditions']['is_' + var + '_derive_from_elevation'] = False
if stage_6_data[var + 'option'] == 'Constant':
ueb_request_data['SiteInitialConditions']['is_' + var + '_constant'] = True
ueb_request_data['SiteInitialConditions'][var + '_constant_value'] = stage_6_data[var]
# set data for confirmation page
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-use constant value:')
tk.c.ueb_input_section_data_items[section_name][var + '-use constant value:'] = 'Yes '
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-constant value:')
tk.c.ueb_input_section_data_items[section_name][var + '-constant value:'] = stage_6_data[var]
elif stage_6_data[var + 'option'] == 'Compute':
if var == 'latitude' or var == 'longitude':
ueb_request_data['SiteInitialConditions']['is_' + var + '_derive_from_projection'] = True
else:
ueb_request_data['SiteInitialConditions']['is_' + var + '_derive_from_elevation'] = True
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-derive from domain elevation data:')
tk.c.ueb_input_section_data_items[section_name][var + '-derive from domain elevation data:'] = 'Yes '
else:
selected_file_ids[var + '_grid_file_id'] = stage_6_data[var + 'gridfile']
ueb_request_data['SiteInitialConditions'][var + '_grid_file_name'] = \
_get_file_name_from_file_id(stage_6_data[var + 'gridfile'])
ueb_request_data['SiteInitialConditions'][var + '_grid_file_format'] = stage_6_data[var + 'gridfileformat']
# set data for confirmation page
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-use constant value:')
tk.c.ueb_input_section_data_items[section_name][var + '-use constant value:'] = 'No '
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-derive from elevation data:')
tk.c.ueb_input_section_data_items[section_name][var + '-derive from elevation data:'] = 'No '
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-grid file name:')
tk.c.ueb_input_section_data_items[section_name][var + '-grid file name:'] = \
ueb_request_data['SiteInitialConditions'][var + '_grid_file_name']
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-grid file format:')
tk.c.ueb_input_section_data_items[section_name][var + '-grid file name:'] =\
stage_6_data[var + 'gridfileformat']
stage_7_data = session.get('stage_7', None)
if not stage_7_data:
# get the default data
stage_7_data = _get_default_data_stage_seven()
# set data for confirmation page
section_name = 'Site initial condition - monthly mean dirunal temperature setup'
tk.c.ueb_input_sections.append(section_name)
tk.c.ueb_input_section_data_items[section_name] = {}
tk.c.ueb_input_section_data_items[section_name]['order'] = []
ueb_request_data['BristowCambellBValues'] = {}
for key_month, value_temp in stage_7_data.items():
var = 'b' + _convert_month_name_to_month_number(key_month)
ueb_request_data['BristowCambellBValues'][var] = value_temp
month_names = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September',
'October', 'November', 'December']
months_dict = {
'January': '01',
'February': '02',
'March': '03',
'April': '04',
'May': '05',
'June': '06',
'July': '07',
'August': '08',
'September': '09',
'October': '10',
'November': '11',
'December': '12'
}
for month in month_names:
var = 'b' + months_dict[month]
tk.c.ueb_input_section_data_items[section_name]['order'].append(month + ':')
tk.c.ueb_input_section_data_items[section_name][month + ':'] = ueb_request_data['BristowCambellBValues'][var]
stage_8_data = session.get('stage_8', None)
if not stage_8_data:
# get the default data
stage_8_data = _get_default_data_stage_eight()
# set data for confirmation page
section_name = 'Time series variables setup'
tk.c.ueb_input_sections.append(section_name)
tk.c.ueb_input_section_data_items[section_name] = {}
tk.c.ueb_input_section_data_items[section_name]['order'] = []
ueb_request_data['TimeSeriesInputs'] = {}
stage_8_variables = ['ta', 'prec', 'v', 'rh', 'snowalb', 'qg']
for var in stage_8_variables:
ueb_request_data['TimeSeriesInputs'][var + '_constant_value'] = 0
ueb_request_data['TimeSeriesInputs'][var + '_text_file_name'] = ''
ueb_request_data['TimeSeriesInputs'][var + '_grid_file_name'] = ''
ueb_request_data['TimeSeriesInputs'][var + '_grid_file_format'] = ''
ueb_request_data['TimeSeriesInputs']['is_' + var + '_constant'] = False
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-compute:')
if var != 'qg':
ueb_request_data['TimeSeriesInputs']['is_' + var + '_compute'] = False
# set data for confirmation page
tk.c.ueb_input_section_data_items[section_name][var + '-compute:'] = 'No'
if stage_8_data[var + 'option'] == 'Constant':
ueb_request_data['TimeSeriesInputs']['is_' + var + '_constant'] = True
ueb_request_data['TimeSeriesInputs'][var + '_constant_value'] = stage_8_data[var]
tk.c.ueb_input_section_data_items[section_name][var + '-compute:'] = 'No'
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-use constant value:')
tk.c.ueb_input_section_data_items[section_name][var + '-use constant value:'] = 'Yes'
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-constant value:')
tk.c.ueb_input_section_data_items[section_name][var + '-constant value:'] = stage_8_data[var]
elif stage_8_data[var + 'option'] == 'Compute':
ueb_request_data['TimeSeriesInputs']['is_' + var + '_compute'] = True
tk.c.ueb_input_section_data_items[section_name][var + '-compute:'] = 'Yes'
elif stage_8_data[var + 'option'] == 'Text':
selected_file_ids[var + '_text_file_id'] = stage_8_data[var + 'textfile']
ueb_request_data['TimeSeriesInputs'][var + '_text_file_name'] = \
_get_file_name_from_file_id(stage_8_data[var + 'textfile'])
tk.c.ueb_input_section_data_items[section_name][var + '-compute:'] = 'No'
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-use constant value:')
tk.c.ueb_input_section_data_items[section_name][var + '-use constant value:'] = 'No'
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-use an input text file:')
tk.c.ueb_input_section_data_items[section_name][var + '-use an input text file:'] = 'Yes'
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-text file name:')
tk.c.ueb_input_section_data_items[section_name][var + '-text file name:'] = \
ueb_request_data['TimeSeriesInputs'][var + '_text_file_name']
else:
selected_file_ids[var + '_grid_file_id'] = stage_8_data[var + 'gridfile']
ueb_request_data['TimeSeriesInputs'][var + '_grid_file_name'] = \
_get_file_name_from_file_id(stage_8_data[var + 'gridfile'])
ueb_request_data['TimeSeriesInputs'][var + '_grid_file_format'] = stage_8_data[var + 'gridfileformat']
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-use a input grid file:')
tk.c.ueb_input_section_data_items[section_name][var + '-use a input grid file:'] = 'Yes'
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-grid file name:')
tk.c.ueb_input_section_data_items[section_name][var + '-grid file name:'] =\
ueb_request_data['TimeSeriesInputs'][var + '_grid_file_name']
tk.c.ueb_input_section_data_items[section_name]['order'].append(var + '-grid file format:')
tk.c.ueb_input_section_data_items[section_name][var + '-grid file name:'] =\
stage_8_data[var + 'gridfileformat']
stage_9_data = session.get('stage_9', None)
if not stage_9_data:
# get the default data
stage_9_data = _get_default_data_stage_nine()
# set data for confirmation page
section_name = 'Output variables setup'
tk.c.ueb_input_sections.append(section_name)
tk.c.ueb_input_section_data_items[section_name] = {}
tk.c.ueb_input_section_data_items[section_name]['order'] = []
default_output_control_file_name = 'outputcontrol.dat'
default_aggr_output_control_file_name = 'aggregatedoutputcontrol.dat'
tk.c.ueb_input_section_data_items[section_name]['order'].append('Use default output control file:')
tk.c.ueb_input_section_data_items[section_name]['order'].append('Use default aggregated output control file:')
if stage_9_data['outputControlFileOption'] == 'Yes':
selected_file_ids['outputcontrol_file_id'] = _get_file_id_from_file_name(ueb_default_input_dataset_id,
default_output_control_file_name)
ueb_request_data['OutputControlFileName'] = default_output_control_file_name
tk.c.ueb_input_section_data_items[section_name]['Use default output control file:'] = 'Yes'
else:
selected_file_ids['outputcontrol_file_id'] = stage_9_data['outputControlFile']
ueb_request_data['OutputControlFileName'] =\
_get_file_name_from_file_id(selected_file_ids['outputcontrol_file_id'])
tk.c.ueb_input_section_data_items[section_name]['Use default output control file:'] = 'No'
tk.c.ueb_input_section_data_items[section_name]['order'].append('Output control file name:')
tk.c.ueb_input_section_data_items[section_name]['Output control file name:'] =\
ueb_request_data['OutputControlFileName']
if stage_9_data['aggrOutputControlFileOption'] == 'Yes':
selected_file_ids['aggroutputcontrol_file_id'] = _get_file_id_from_file_name(
ueb_default_input_dataset_id, default_aggr_output_control_file_name)
ueb_request_data['AggregatedOutputControlFileName'] = default_aggr_output_control_file_name
tk.c.ueb_input_section_data_items[section_name]['Use default aggregated output control file:'] = 'Yes'
else:
selected_file_ids['aggroutputcontrol_file_id'] = stage_9_data['aggrOutputControlFile']
ueb_request_data['AggregatedOutputControlFileName'] = _get_file_name_from_file_id(
selected_file_ids['aggroutputcontrol_file_id'])
tk.c.ueb_input_section_data_items[section_name]['Use default aggregated output control file:'] = 'Yes'
tk.c.ueb_input_section_data_items[section_name]['order'].append('Aggregated output control file name:')
tk.c.ueb_input_section_data_items[section_name]['Aggregated output control file name:'] =\
ueb_request_data['AggregatedOutputControlFileName']
ueb_req_json = json.dumps(ueb_request_data)
ueb_request = {'selected_file_ids': selected_file_ids, 'ueb_req_json': ueb_req_json}
return ueb_request
def _convert_month_name_to_month_number(month):
months = {'january': '01', 'february': '02', 'march': '03', 'april': '04', 'may': '05', 'june': '06', 'july': '07',
'august': '08', 'september': '09', 'october': '10', 'november': '11', 'december': '12' }
return months.get(month, '00')
def _save_ueb_request_as_dataset(pkgname, selected_file_ids, selected_user_org):
"""
create a new ckan package/datatset of dataset type 'model-configuration'
with package title as the name of the ueb model package request
save the package request information from the c object to a text file to a temporary dir
and then add the file as a resource to the package created. Also add all other files
selected as inputs to build package as a resource in the same dataset
param pkgname: name of the package as entered by the user
param selected_file_ids: a dict object containing name of files with their corresponding resource ids
param selected_user_org: id of the user selected organization for which a new dataset will be created
return: id of the new dataset that was created or None
"""
source = 'uebpackage.packagecreate._save_ueb_request_as_dataset():'
ckan_default_dir = uebhelper.StringSettings.ckan_user_session_temp_dir
# create a directory for saving the file
# this will be a dir in the form of: /tmp/ckan/{session id}/files
#destination_dir = os.path.join(ckan_default_dir, base.session.id, 'files')
try:
destination_dir = os.path.join(ckan_default_dir, base.session.id)
if os.path.isdir(destination_dir):
shutil.rmtree(destination_dir)
os.makedirs(destination_dir)
except Exception as e:
log.error(source + 'Failed to create temporary dir for shapefile: %s \n Exception: %s' % (destination_dir, e))
return None
ueb_request_text_file_name = uebhelper.StringSettings.ueb_request_text_resource_file_name
request_resource_file = os.path.join(destination_dir, ueb_request_text_file_name)
try:
with open(request_resource_file, 'w') as file_obj:
for section in tk.c.ueb_input_sections:
file_obj.write(section + ':\n')
section_data_items = tk.c.ueb_input_section_data_items[section]
for key in section_data_items['order']:
data_item_line_to_write = '\t' + key + '%s' % section_data_items[key] + '\n'
file_obj.write(data_item_line_to_write)
file_obj.write('\n')
except Exception as e:
log.error(source + 'Failed to save the ueb_package request file to temporary location'
' for package: %s \n Exception: %s' % (pkgname, e))
return None
# create a package
package_create_action = tk.get_action('package_create')
# create unique package name using the current time stamp as a postfix to any package name
unique_postfix = datetime.now().isoformat().replace(':', '-').replace('.', '-').lower()
pkg_title = pkgname # + '_'
data_dict = {
'name': 'model_configuration_' + unique_postfix, # this needs to be unique as required by DB
'type': 'model-configuration', # dataset type as defined in custom dataset plugin
'title': pkg_title,
'owner_org': selected_user_org,
'author': tk.c.user or tk.c.author, # TODO: Need to retrieve user full name
'notes': 'This is a dataset consisting of UEB model configuration related resources',
'processing_status': 'In queue',
'package_build_request_job_id': '',
'model_name': 'UEB',
'package_availability': uebhelper.StringSettings.app_server_job_status_package_not_available
}
context = {'model': base.model, 'session': base.model.Session, 'user': tk.c.user or tk.c.author, 'save': 'save'}
try:
pkg_dict = package_create_action(context, data_dict)
log.info(source + 'A new dataset was created with name: %s' % data_dict['title']
+ ' as part of ueb package build request.')
except Exception as e:
log.error(source + 'Failed to create a new dataset for ueb_package request for'
' package name: %s \n Exception: %s' % (pkgname, e))
return None
pkg_id = pkg_dict['id']
# add the uploaded ueb request data file as a resource to the above dataset
try:
_add_file_to_dataset(context, pkg_dict, request_resource_file)
except Exception as e:
log.error(source + ' Failed to update the newly created model-configuration dataset for adding package '
'configuration file as a resource.\n Exception: %s' % e)
tk.get_action('package_delete')(context, {'id': pkg_id})
log.info(source + ' Deleting the newly created dataset')
return None
# for each file id for the user selected files, get the file
# object and add that to the package/dataset as a link resource
resource_show_action = tk.get_action('resource_show')
for file_id in selected_file_ids.values():
# get the resource that has the id equal to the given resource id
data_dict = {'id': file_id}
resource_metadata = resource_show_action(context, data_dict)
resource_url = resource_metadata.get('url')
resource_name = resource_metadata.get('name')
resource_desc = resource_metadata.get('description')
resource_format = resource_metadata.get('format')
resource_created_date = resource_metadata.get('_creation_date')
resource_size = resource_metadata.get('_content_length')
data_dict = {
"url": resource_url,
"name": resource_name,
"created": resource_created_date,
"format": resource_format,
"size": resource_size,
"description": resource_desc,
"resource_type": 'file.link',
"url_type": 'link'
}
try:
pkg_dict = uebhelper.get_package(pkg_id)
_add_resource_link_to_dataset(context, pkg_dict, data_dict)
except Exception as e:
log.error(source + 'Failed to add resource links to the dataset as part of the user selected'
' files for package name: %s \n Exception: %s' % (pkgname, e))
tk.get_action('package_delete')(context, {'id': pkg_id})
log.info(source + ' Deleting the newly created dataset')
return None
return pkg_id
def _add_file_to_dataset(context, pkg_dict, file_to_add):
if not 'resources' in pkg_dict:
pkg_dict['resources'] = []
file_name = os.path.basename(file_to_add)
file_name = munge.munge_filename(file_name)
file_part, file_extension = os.path.splitext(file_to_add)
# remove the dot from the extension string '.txt' -> 'txt'
file_extension = file_extension[1:]
resource = {'url': file_name, 'url_type': 'upload'}
upload = uploader.ResourceUpload(resource)
upload.filename = file_name
upload.upload_file = open(file_to_add, 'r')
# FIXME The url type setting here to file name is not appropriate. We need to pass the actual resource url
data_dict = {
'name': file_name,
'description': 'UEB configuration settings',
'format': file_extension,
'url': file_name,
'resource_type': 'file.upload',
'url_type': 'upload'
}
pkg_dict['resources'].append(data_dict)
context['defer_commit'] = True
context['use_cache'] = False
# update the package
package_update_action = tk.get_action('package_update')
package_update_action(context, pkg_dict)
context.pop('defer_commit')
# Get out resource_id resource from model as it will not appear in
# package_show until after commit
upload.upload(context['package'].resources[-1].id, uploader.get_max_resource_size())
base.model.repo.commit()
def _add_resource_link_to_dataset(context, pkg_dict, resource_dict):
if not 'resources' in pkg_dict:
pkg_dict['resources'] = []
pkg_dict['resources'].append(resource_dict)
context['defer_commit'] = True
context['use_cache'] = False
# update the package
package_update_action = tk.get_action('package_update')
package_update_action(context, pkg_dict)
context.pop('defer_commit')
base.model.repo.commit()
| bsd-3-clause |
adamchainz/ansible | test/runner/lib/util.py | 27 | 14358 | """Miscellaneous utility functions and classes."""
from __future__ import absolute_import, print_function
import errno
import os
import pipes
import pkgutil
import shutil
import subprocess
import re
import sys
import time
def is_shippable():
"""
:rtype: bool
"""
return os.environ.get('SHIPPABLE') == 'true'
def remove_file(path):
"""
:type path: str
"""
if os.path.isfile(path):
os.remove(path)
def find_executable(executable, cwd=None, path=None, required=True):
"""
:type executable: str
:type cwd: str
:type path: str
:type required: bool | str
:rtype: str | None
"""
match = None
real_cwd = os.getcwd()
if not cwd:
cwd = real_cwd
if os.path.dirname(executable):
target = os.path.join(cwd, executable)
if os.path.exists(target) and os.access(target, os.F_OK | os.X_OK):
match = executable
else:
if path is None:
path = os.environ.get('PATH', os.defpath)
if path:
path_dirs = path.split(os.pathsep)
seen_dirs = set()
for path_dir in path_dirs:
if path_dir in seen_dirs:
continue
seen_dirs.add(path_dir)
if os.path.abspath(path_dir) == real_cwd:
path_dir = cwd
candidate = os.path.join(path_dir, executable)
if os.path.exists(candidate) and os.access(candidate, os.F_OK | os.X_OK):
match = candidate
break
if not match and required:
message = 'Required program "%s" not found.' % executable
if required != 'warning':
raise ApplicationError(message)
display.warning(message)
return match
def run_command(args, cmd, capture=False, env=None, data=None, cwd=None, always=False, stdin=None, stdout=None,
cmd_verbosity=1):
"""
:type args: CommonConfig
:type cmd: collections.Iterable[str]
:type capture: bool
:type env: dict[str, str] | None
:type data: str | None
:type cwd: str | None
:type always: bool
:type stdin: file | None
:type stdout: file | None
:type cmd_verbosity: int
:rtype: str | None, str | None
"""
explain = args.explain and not always
return raw_command(cmd, capture=capture, env=env, data=data, cwd=cwd, explain=explain, stdin=stdin, stdout=stdout,
cmd_verbosity=cmd_verbosity)
def raw_command(cmd, capture=False, env=None, data=None, cwd=None, explain=False, stdin=None, stdout=None,
cmd_verbosity=1):
"""
:type cmd: collections.Iterable[str]
:type capture: bool
:type env: dict[str, str] | None
:type data: str | None
:type cwd: str | None
:type explain: bool
:type stdin: file | None
:type stdout: file | None
:type cmd_verbosity: int
:rtype: str | None, str | None
"""
if not cwd:
cwd = os.getcwd()
if not env:
env = common_environment()
cmd = list(cmd)
escaped_cmd = ' '.join(pipes.quote(c) for c in cmd)
display.info('Run command: %s' % escaped_cmd, verbosity=cmd_verbosity)
display.info('Working directory: %s' % cwd, verbosity=2)
program = find_executable(cmd[0], cwd=cwd, path=env['PATH'], required='warning')
if program:
display.info('Program found: %s' % program, verbosity=2)
for key in sorted(env.keys()):
display.info('%s=%s' % (key, env[key]), verbosity=2)
if explain:
return None, None
communicate = False
if stdin is not None:
data = None
communicate = True
elif data is not None:
stdin = subprocess.PIPE
communicate = True
if stdout:
communicate = True
if capture:
stdout = stdout or subprocess.PIPE
stderr = subprocess.PIPE
communicate = True
else:
stderr = None
start = time.time()
try:
process = subprocess.Popen(cmd, env=env, stdin=stdin, stdout=stdout, stderr=stderr, cwd=cwd)
except OSError as ex:
if ex.errno == errno.ENOENT:
raise ApplicationError('Required program "%s" not found.' % cmd[0])
raise
if communicate:
encoding = 'utf-8'
data_bytes = data.encode(encoding) if data else None
stdout_bytes, stderr_bytes = process.communicate(data_bytes)
stdout_text = stdout_bytes.decode(encoding) if stdout_bytes else u''
stderr_text = stderr_bytes.decode(encoding) if stderr_bytes else u''
else:
process.wait()
stdout_text, stderr_text = None, None
status = process.returncode
runtime = time.time() - start
display.info('Command exited with status %s after %s seconds.' % (status, runtime), verbosity=4)
if status == 0:
return stdout_text, stderr_text
raise SubprocessError(cmd, status, stdout_text, stderr_text, runtime)
def common_environment():
"""Common environment used for executing all programs."""
env = dict(
LC_ALL='en_US.UTF-8',
PATH=os.environ.get('PATH', os.defpath),
)
required = (
'HOME',
)
optional = (
'HTTPTESTER',
'SSH_AUTH_SOCK'
)
env.update(pass_vars(required=required, optional=optional))
return env
def pass_vars(required, optional):
"""
:type required: collections.Iterable[str]
:type optional: collections.Iterable[str]
:rtype: dict[str, str]
"""
env = {}
for name in required:
if name not in os.environ:
raise MissingEnvironmentVariable(name)
env[name] = os.environ[name]
for name in optional:
if name not in os.environ:
continue
env[name] = os.environ[name]
return env
def deepest_path(path_a, path_b):
"""Return the deepest of two paths, or None if the paths are unrelated.
:type path_a: str
:type path_b: str
:rtype: str | None
"""
if path_a == '.':
path_a = ''
if path_b == '.':
path_b = ''
if path_a.startswith(path_b):
return path_a or '.'
if path_b.startswith(path_a):
return path_b or '.'
return None
def remove_tree(path):
"""
:type path: str
"""
try:
shutil.rmtree(path)
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
def make_dirs(path):
"""
:type path: str
"""
try:
os.makedirs(path)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
def is_binary_file(path):
"""
:type path: str
:rtype: bool
"""
with open(path, 'rb') as path_fd:
return b'\0' in path_fd.read(1024)
class Display(object):
"""Manages color console output."""
clear = '\033[0m'
red = '\033[31m'
green = '\033[32m'
yellow = '\033[33m'
blue = '\033[34m'
purple = '\033[35m'
cyan = '\033[36m'
verbosity_colors = {
0: None,
1: green,
2: blue,
3: cyan,
}
def __init__(self):
self.verbosity = 0
self.color = True
self.warnings = []
self.warnings_unique = set()
self.info_stderr = False
def __warning(self, message):
"""
:type message: str
"""
self.print_message('WARNING: %s' % message, color=self.purple, fd=sys.stderr)
def review_warnings(self):
"""Review all warnings which previously occurred."""
if not self.warnings:
return
self.__warning('Reviewing previous %d warning(s):' % len(self.warnings))
for warning in self.warnings:
self.__warning(warning)
def warning(self, message, unique=False):
"""
:type message: str
:type unique: bool
"""
if unique:
if message in self.warnings_unique:
return
self.warnings_unique.add(message)
self.__warning(message)
self.warnings.append(message)
def notice(self, message):
"""
:type message: str
"""
self.print_message('NOTICE: %s' % message, color=self.purple, fd=sys.stderr)
def error(self, message):
"""
:type message: str
"""
self.print_message('ERROR: %s' % message, color=self.red, fd=sys.stderr)
def info(self, message, verbosity=0):
"""
:type message: str
:type verbosity: int
"""
if self.verbosity >= verbosity:
color = self.verbosity_colors.get(verbosity, self.yellow)
self.print_message(message, color=color, fd=sys.stderr if self.info_stderr else sys.stdout)
def print_message(self, message, color=None, fd=sys.stdout): # pylint: disable=locally-disabled, invalid-name
"""
:type message: str
:type color: str | None
:type fd: file
"""
if color and self.color:
# convert color resets in message to desired color
message = message.replace(self.clear, color)
message = '%s%s%s' % (color, message, self.clear)
print(message, file=fd)
fd.flush()
class ApplicationError(Exception):
"""General application error."""
pass
class ApplicationWarning(Exception):
"""General application warning which interrupts normal program flow."""
pass
class SubprocessError(ApplicationError):
"""Error resulting from failed subprocess execution."""
def __init__(self, cmd, status=0, stdout=None, stderr=None, runtime=None):
"""
:type cmd: list[str]
:type status: int
:type stdout: str | None
:type stderr: str | None
:type runtime: float | None
"""
message = 'Command "%s" returned exit status %s.\n' % (' '.join(pipes.quote(c) for c in cmd), status)
if stderr:
message += '>>> Standard Error\n'
message += '%s%s\n' % (stderr.strip(), Display.clear)
if stdout:
message += '>>> Standard Output\n'
message += '%s%s\n' % (stdout.strip(), Display.clear)
message = message.strip()
super(SubprocessError, self).__init__(message)
self.cmd = cmd
self.status = status
self.stdout = stdout
self.stderr = stderr
self.runtime = runtime
class MissingEnvironmentVariable(ApplicationError):
"""Error caused by missing environment variable."""
def __init__(self, name):
"""
:type name: str
"""
super(MissingEnvironmentVariable, self).__init__('Missing environment variable: %s' % name)
self.name = name
class CommonConfig(object):
"""Configuration common to all commands."""
def __init__(self, args):
"""
:type args: any
"""
self.color = args.color # type: bool
self.explain = args.explain # type: bool
self.verbosity = args.verbosity # type: int
self.debug = args.debug # type: bool
class EnvironmentConfig(CommonConfig):
"""Configuration common to all commands which execute in an environment."""
def __init__(self, args, command):
"""
:type args: any
"""
super(EnvironmentConfig, self).__init__(args)
self.command = command
self.local = args.local is True
if args.tox is True or args.tox is False or args.tox is None:
self.tox = args.tox is True
self.tox_args = 0
self.python = args.python if 'python' in args else None # type: str
else:
self.tox = True
self.tox_args = 1
self.python = args.tox # type: str
self.docker = docker_qualify_image(args.docker) # type: str
self.remote = args.remote # type: str
self.docker_privileged = args.docker_privileged if 'docker_privileged' in args else False # type: bool
self.docker_util = docker_qualify_image(args.docker_util if 'docker_util' in args else '') # type: str
self.docker_pull = args.docker_pull if 'docker_pull' in args else False # type: bool
self.tox_sitepackages = args.tox_sitepackages # type: bool
self.remote_stage = args.remote_stage # type: str
self.remote_aws_region = args.remote_aws_region # type: str
self.remote_terminate = args.remote_terminate # type: str
self.requirements = args.requirements # type: bool
if self.python == 'default':
self.python = '.'.join(str(i) for i in sys.version_info[:2])
self.python_version = self.python or '.'.join(str(i) for i in sys.version_info[:2])
self.delegate = self.tox or self.docker or self.remote
if self.delegate:
self.requirements = True
def docker_qualify_image(name):
"""
:type name: str
:rtype: str
"""
if not name or any((c in name) for c in ('/', ':')):
return name
return 'ansible/ansible:%s' % name
def parse_to_dict(pattern, value):
"""
:type pattern: str
:type value: str
:return: dict[str, str]
"""
match = re.search(pattern, value)
if match is None:
raise Exception('Pattern "%s" did not match value: %s' % (pattern, value))
return match.groupdict()
def get_subclasses(class_type):
"""
:type class_type: type
:rtype: set[str]
"""
subclasses = set()
queue = [class_type]
while queue:
parent = queue.pop()
for child in parent.__subclasses__():
if child not in subclasses:
subclasses.add(child)
queue.append(child)
return subclasses
def import_plugins(directory):
"""
:type directory: str
"""
path = os.path.join(os.path.dirname(__file__), directory)
prefix = 'lib.%s.' % directory
for (_, name, _) in pkgutil.iter_modules([path], prefix=prefix):
__import__(name)
def load_plugins(base_type, database):
"""
:type base_type: type
:type database: dict[str, type]
"""
plugins = dict((sc.__module__.split('.')[2], sc) for sc in get_subclasses(base_type)) # type: dict [str, type]
for plugin in plugins:
database[plugin] = plugins[plugin]
display = Display() # pylint: disable=locally-disabled, invalid-name
| gpl-3.0 |
jymannob/CouchPotatoServer | libs/suds/client.py | 150 | 25971 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{2nd generation} service proxy provides access to web services.
See I{README.txt}
"""
import suds
import suds.metrics as metrics
from cookielib import CookieJar
from suds import *
from suds.reader import DefinitionsReader
from suds.transport import TransportError, Request
from suds.transport.https import HttpAuthenticated
from suds.servicedefinition import ServiceDefinition
from suds import sudsobject
from sudsobject import Factory as InstFactory
from sudsobject import Object
from suds.resolver import PathResolver
from suds.builder import Builder
from suds.wsdl import Definitions
from suds.cache import ObjectCache
from suds.sax.document import Document
from suds.sax.parser import Parser
from suds.options import Options
from suds.properties import Unskin
from urlparse import urlparse
from copy import deepcopy
from suds.plugin import PluginContainer
from logging import getLogger
log = getLogger(__name__)
class Client(object):
"""
A lightweight web services client.
I{(2nd generation)} API.
@ivar wsdl: The WSDL object.
@type wsdl:L{Definitions}
@ivar service: The service proxy used to invoke operations.
@type service: L{Service}
@ivar factory: The factory used to create objects.
@type factory: L{Factory}
@ivar sd: The service definition
@type sd: L{ServiceDefinition}
@ivar messages: The last sent/received messages.
@type messages: str[2]
"""
@classmethod
def items(cls, sobject):
"""
Extract the I{items} from a suds object much like the
items() method works on I{dict}.
@param sobject: A suds object
@type sobject: L{Object}
@return: A list of items contained in I{sobject}.
@rtype: [(key, value),...]
"""
return sudsobject.items(sobject)
@classmethod
def dict(cls, sobject):
"""
Convert a sudsobject into a dictionary.
@param sobject: A suds object
@type sobject: L{Object}
@return: A python dictionary containing the
items contained in I{sobject}.
@rtype: dict
"""
return sudsobject.asdict(sobject)
@classmethod
def metadata(cls, sobject):
"""
Extract the metadata from a suds object.
@param sobject: A suds object
@type sobject: L{Object}
@return: The object's metadata
@rtype: L{sudsobject.Metadata}
"""
return sobject.__metadata__
def __init__(self, url, **kwargs):
"""
@param url: The URL for the WSDL.
@type url: str
@param kwargs: keyword arguments.
@see: L{Options}
"""
options = Options()
options.transport = HttpAuthenticated()
self.options = options
options.cache = ObjectCache(days=1)
self.set_options(**kwargs)
reader = DefinitionsReader(options, Definitions)
self.wsdl = reader.open(url)
plugins = PluginContainer(options.plugins)
plugins.init.initialized(wsdl=self.wsdl)
self.factory = Factory(self.wsdl)
self.service = ServiceSelector(self, self.wsdl.services)
self.sd = []
for s in self.wsdl.services:
sd = ServiceDefinition(self.wsdl, s)
self.sd.append(sd)
self.messages = dict(tx=None, rx=None)
def set_options(self, **kwargs):
"""
Set options.
@param kwargs: keyword arguments.
@see: L{Options}
"""
p = Unskin(self.options)
p.update(kwargs)
def add_prefix(self, prefix, uri):
"""
Add I{static} mapping of an XML namespace prefix to a namespace.
This is useful for cases when a wsdl and referenced schemas make heavy
use of namespaces and those namespaces are subject to changed.
@param prefix: An XML namespace prefix.
@type prefix: str
@param uri: An XML namespace URI.
@type uri: str
@raise Exception: when prefix is already mapped.
"""
root = self.wsdl.root
mapped = root.resolvePrefix(prefix, None)
if mapped is None:
root.addPrefix(prefix, uri)
return
if mapped[1] != uri:
raise Exception('"%s" already mapped as "%s"' % (prefix, mapped))
def last_sent(self):
"""
Get last sent I{soap} message.
@return: The last sent I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('tx')
def last_received(self):
"""
Get last received I{soap} message.
@return: The last received I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('rx')
def clone(self):
"""
Get a shallow clone of this object.
The clone only shares the WSDL. All other attributes are
unique to the cloned object including options.
@return: A shallow clone.
@rtype: L{Client}
"""
class Uninitialized(Client):
def __init__(self):
pass
clone = Uninitialized()
clone.options = Options()
cp = Unskin(clone.options)
mp = Unskin(self.options)
cp.update(deepcopy(mp))
clone.wsdl = self.wsdl
clone.factory = self.factory
clone.service = ServiceSelector(clone, self.wsdl.services)
clone.sd = self.sd
clone.messages = dict(tx=None, rx=None)
return clone
def __str__(self):
return unicode(self)
def __unicode__(self):
s = ['\n']
build = suds.__build__.split()
s.append('Suds ( https://fedorahosted.org/suds/ )')
s.append(' version: %s' % suds.__version__)
s.append(' %s build: %s' % (build[0], build[1]))
for sd in self.sd:
s.append('\n\n%s' % unicode(sd))
return ''.join(s)
class Factory:
"""
A factory for instantiating types defined in the wsdl
@ivar resolver: A schema type resolver.
@type resolver: L{PathResolver}
@ivar builder: A schema object builder.
@type builder: L{Builder}
"""
def __init__(self, wsdl):
"""
@param wsdl: A schema object.
@type wsdl: L{wsdl.Definitions}
"""
self.wsdl = wsdl
self.resolver = PathResolver(wsdl)
self.builder = Builder(self.resolver)
def create(self, name):
"""
create a WSDL type by name
@param name: The name of a type defined in the WSDL.
@type name: str
@return: The requested object.
@rtype: L{Object}
"""
timer = metrics.Timer()
timer.start()
type = self.resolver.find(name)
if type is None:
raise TypeNotFound(name)
if type.enum():
result = InstFactory.object(name)
for e, a in type.children():
setattr(result, e.name, e.name)
else:
try:
result = self.builder.build(type)
except Exception, e:
log.error("create '%s' failed", name, exc_info=True)
raise BuildError(name, e)
timer.stop()
metrics.log.debug('%s created: %s', name, timer)
return result
def separator(self, ps):
"""
Set the path separator.
@param ps: The new path separator.
@type ps: char
"""
self.resolver = PathResolver(self.wsdl, ps)
class ServiceSelector:
"""
The B{service} selector is used to select a web service.
In most cases, the wsdl only defines (1) service in which access
by subscript is passed through to a L{PortSelector}. This is also the
behavior when a I{default} service has been specified. In cases
where multiple services have been defined and no default has been
specified, the service is found by name (or index) and a L{PortSelector}
for the service is returned. In all cases, attribute access is
forwarded to the L{PortSelector} for either the I{first} service or the
I{default} service (when specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __services: A list of I{wsdl} services.
@type __services: list
"""
def __init__(self, client, services):
"""
@param client: A suds client.
@type client: L{Client}
@param services: A list of I{wsdl} services.
@type services: list
"""
self.__client = client
self.__services = services
def __getattr__(self, name):
"""
Request to access an attribute is forwarded to the
L{PortSelector} for either the I{first} service or the
I{default} service (when specified).
@param name: The name of a method.
@type name: str
@return: A L{PortSelector}.
@rtype: L{PortSelector}.
"""
default = self.__ds()
if default is None:
port = self.__find(0)
else:
port = default
return getattr(port, name)
def __getitem__(self, name):
"""
Provides selection of the I{service} by name (string) or
index (integer). In cases where only (1) service is defined
or a I{default} has been specified, the request is forwarded
to the L{PortSelector}.
@param name: The name (or index) of a service.
@type name: (int|str)
@return: A L{PortSelector} for the specified service.
@rtype: L{PortSelector}.
"""
if len(self.__services) == 1:
port = self.__find(0)
return port[name]
default = self.__ds()
if default is not None:
port = default
return port[name]
return self.__find(name)
def __find(self, name):
"""
Find a I{service} by name (string) or index (integer).
@param name: The name (or index) of a service.
@type name: (int|str)
@return: A L{PortSelector} for the found service.
@rtype: L{PortSelector}.
"""
service = None
if not len(self.__services):
raise Exception, 'No services defined'
if isinstance(name, int):
try:
service = self.__services[name]
name = service.name
except IndexError:
raise ServiceNotFound, 'at [%d]' % name
else:
for s in self.__services:
if name == s.name:
service = s
break
if service is None:
raise ServiceNotFound, name
return PortSelector(self.__client, service.ports, name)
def __ds(self):
"""
Get the I{default} service if defined in the I{options}.
@return: A L{PortSelector} for the I{default} service.
@rtype: L{PortSelector}.
"""
ds = self.__client.options.service
if ds is None:
return None
else:
return self.__find(ds)
class PortSelector:
"""
The B{port} selector is used to select a I{web service} B{port}.
In cases where multiple ports have been defined and no default has been
specified, the port is found by name (or index) and a L{MethodSelector}
for the port is returned. In all cases, attribute access is
forwarded to the L{MethodSelector} for either the I{first} port or the
I{default} port (when specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __ports: A list of I{service} ports.
@type __ports: list
@ivar __qn: The I{qualified} name of the port (used for logging).
@type __qn: str
"""
def __init__(self, client, ports, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param ports: A list of I{service} ports.
@type ports: list
@param qn: The name of the service.
@type qn: str
"""
self.__client = client
self.__ports = ports
self.__qn = qn
def __getattr__(self, name):
"""
Request to access an attribute is forwarded to the
L{MethodSelector} for either the I{first} port or the
I{default} port (when specified).
@param name: The name of a method.
@type name: str
@return: A L{MethodSelector}.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
m = self.__find(0)
else:
m = default
return getattr(m, name)
def __getitem__(self, name):
"""
Provides selection of the I{port} by name (string) or
index (integer). In cases where only (1) port is defined
or a I{default} has been specified, the request is forwarded
to the L{MethodSelector}.
@param name: The name (or index) of a port.
@type name: (int|str)
@return: A L{MethodSelector} for the specified port.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
return self.__find(name)
else:
return default
def __find(self, name):
"""
Find a I{port} by name (string) or index (integer).
@param name: The name (or index) of a port.
@type name: (int|str)
@return: A L{MethodSelector} for the found port.
@rtype: L{MethodSelector}.
"""
port = None
if not len(self.__ports):
raise Exception, 'No ports defined: %s' % self.__qn
if isinstance(name, int):
qn = '%s[%d]' % (self.__qn, name)
try:
port = self.__ports[name]
except IndexError:
raise PortNotFound, qn
else:
qn = '.'.join((self.__qn, name))
for p in self.__ports:
if name == p.name:
port = p
break
if port is None:
raise PortNotFound, qn
qn = '.'.join((self.__qn, port.name))
return MethodSelector(self.__client, port.methods, qn)
def __dp(self):
"""
Get the I{default} port if defined in the I{options}.
@return: A L{MethodSelector} for the I{default} port.
@rtype: L{MethodSelector}.
"""
dp = self.__client.options.port
if dp is None:
return None
else:
return self.__find(dp)
class MethodSelector:
"""
The B{method} selector is used to select a B{method} by name.
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __methods: A dictionary of methods.
@type __methods: dict
@ivar __qn: The I{qualified} name of the method (used for logging).
@type __qn: str
"""
def __init__(self, client, methods, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param methods: A dictionary of methods.
@type methods: dict
@param qn: The I{qualified} name of the port.
@type qn: str
"""
self.__client = client
self.__methods = methods
self.__qn = qn
def __getattr__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
return self[name]
def __getitem__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
m = self.__methods.get(name)
if m is None:
qn = '.'.join((self.__qn, name))
raise MethodNotFound, qn
return Method(self.__client, m)
class Method:
"""
The I{method} (namespace) object.
@ivar client: A client object.
@type client: L{Client}
@ivar method: A I{wsdl} method.
@type I{wsdl} Method.
"""
def __init__(self, client, method):
"""
@param client: A client object.
@type client: L{Client}
@param method: A I{raw} method.
@type I{raw} Method.
"""
self.client = client
self.method = method
def __call__(self, *args, **kwargs):
"""
Invoke the method.
"""
clientclass = self.clientclass(kwargs)
client = clientclass(self.client, self.method)
if not self.faults():
try:
return client.invoke(args, kwargs)
except WebFault, e:
return (500, e)
else:
return client.invoke(args, kwargs)
def faults(self):
""" get faults option """
return self.client.options.faults
def clientclass(self, kwargs):
""" get soap client class """
if SimClient.simulation(kwargs):
return SimClient
else:
return SoapClient
class SoapClient:
"""
A lightweight soap based web client B{**not intended for external use}
@ivar service: The target method.
@type service: L{Service}
@ivar method: A target method.
@type method: L{Method}
@ivar options: A dictonary of options.
@type options: dict
@ivar cookiejar: A cookie jar.
@type cookiejar: libcookie.CookieJar
"""
def __init__(self, client, method):
"""
@param client: A suds client.
@type client: L{Client}
@param method: A target method.
@type method: L{Method}
"""
self.client = client
self.method = method
self.options = client.options
self.cookiejar = CookieJar()
def invoke(self, args, kwargs):
"""
Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin}|I{subclass of} L{Object}
"""
timer = metrics.Timer()
timer.start()
result = None
binding = self.method.binding.input
soapenv = binding.get_message(self.method, args, kwargs)
timer.stop()
metrics.log.debug(
"message for '%s' created: %s",
self.method.name,
timer)
timer.start()
result = self.send(soapenv)
timer.stop()
metrics.log.debug(
"method '%s' invoked: %s",
self.method.name,
timer)
return result
def send(self, soapenv):
"""
Send soap message.
@param soapenv: A soap envelope to send.
@type soapenv: L{Document}
@return: The reply to the sent message.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
result = None
location = self.location()
binding = self.method.binding.input
transport = self.options.transport
retxml = self.options.retxml
prettyxml = self.options.prettyxml
log.debug('sending to (%s)\nmessage:\n%s', location, soapenv)
try:
self.last_sent(soapenv)
plugins = PluginContainer(self.options.plugins)
plugins.message.marshalled(envelope=soapenv.root())
if prettyxml:
soapenv = soapenv.str()
else:
soapenv = soapenv.plain()
soapenv = soapenv.encode('utf-8')
plugins.message.sending(envelope=soapenv)
request = Request(location, soapenv)
request.headers = self.headers()
reply = transport.send(request)
ctx = plugins.message.received(reply=reply.message)
reply.message = ctx.reply
if retxml:
result = reply.message
else:
result = self.succeeded(binding, reply.message)
except TransportError, e:
if e.httpcode in (202,204):
result = None
else:
log.error(self.last_sent())
result = self.failed(binding, e)
return result
def headers(self):
"""
Get http headers or the http/https request.
@return: A dictionary of header/values.
@rtype: dict
"""
action = self.method.soap.action
stock = { 'Content-Type' : 'text/xml; charset=utf-8', 'SOAPAction': action }
result = dict(stock, **self.options.headers)
log.debug('headers = %s', result)
return result
def succeeded(self, binding, reply):
"""
Request succeeded, process the reply
@param binding: The binding to be used to process the reply.
@type binding: L{bindings.binding.Binding}
@param reply: The raw reply text.
@type reply: str
@return: The method result.
@rtype: I{builtin}, L{Object}
@raise WebFault: On server.
"""
log.debug('http succeeded:\n%s', reply)
plugins = PluginContainer(self.options.plugins)
if len(reply) > 0:
reply, result = binding.get_reply(self.method, reply)
self.last_received(reply)
else:
result = None
ctx = plugins.message.unmarshalled(reply=result)
result = ctx.reply
if self.options.faults:
return result
else:
return (200, result)
def failed(self, binding, error):
"""
Request failed, process reply based on reason
@param binding: The binding to be used to process the reply.
@type binding: L{suds.bindings.binding.Binding}
@param error: The http error message
@type error: L{transport.TransportError}
"""
status, reason = (error.httpcode, tostr(error))
reply = error.fp.read()
log.debug('http failed:\n%s', reply)
if status == 500:
if len(reply) > 0:
r, p = binding.get_fault(reply)
self.last_received(r)
return (status, p)
else:
return (status, None)
if self.options.faults:
raise Exception((status, reason))
else:
return (status, None)
def location(self):
p = Unskin(self.options)
return p.get('location', self.method.location)
def last_sent(self, d=None):
key = 'tx'
messages = self.client.messages
if d is None:
return messages.get(key)
else:
messages[key] = d
def last_received(self, d=None):
key = 'rx'
messages = self.client.messages
if d is None:
return messages.get(key)
else:
messages[key] = d
class SimClient(SoapClient):
"""
Loopback client used for message/reply simulation.
"""
injkey = '__inject'
@classmethod
def simulation(cls, kwargs):
""" get whether loopback has been specified in the I{kwargs}. """
return kwargs.has_key(SimClient.injkey)
def invoke(self, args, kwargs):
"""
Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
simulation = kwargs[self.injkey]
msg = simulation.get('msg')
reply = simulation.get('reply')
fault = simulation.get('fault')
if msg is None:
if reply is not None:
return self.__reply(reply, args, kwargs)
if fault is not None:
return self.__fault(fault)
raise Exception('(reply|fault) expected when msg=None')
sax = Parser()
msg = sax.parse(string=msg)
return self.send(msg)
def __reply(self, reply, args, kwargs):
""" simulate the reply """
binding = self.method.binding.input
msg = binding.get_message(self.method, args, kwargs)
log.debug('inject (simulated) send message:\n%s', msg)
binding = self.method.binding.output
return self.succeeded(binding, reply)
def __fault(self, reply):
""" simulate the (fault) reply """
binding = self.method.binding.output
if self.options.faults:
r, p = binding.get_fault(reply)
self.last_received(r)
return (500, p)
else:
return (500, None)
| gpl-3.0 |
Lektorium-LLC/edx-platform | lms/djangoapps/commerce/api/v1/views.py | 5 | 3413 | import logging
from django.contrib.auth.models import User
from django.http import Http404
from edx_rest_api_client import exceptions
from edx_rest_framework_extensions.authentication import JwtAuthentication
from rest_framework.authentication import SessionAuthentication
from rest_framework.generics import ListAPIView, RetrieveUpdateAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from rest_framework_oauth.authentication import OAuth2Authentication
from commerce.api.v1.models import Course
from commerce.api.v1.permissions import ApiKeyOrModelPermission, IsAuthenticatedOrActivationOverridden
from commerce.api.v1.serializers import CourseSerializer
from commerce.utils import is_account_activation_requirement_disabled
from course_modes.models import CourseMode
from openedx.core.djangoapps.commerce.utils import ecommerce_api_client
from openedx.core.lib.api.mixins import PutAsCreateMixin
from util.json_request import JsonResponse
log = logging.getLogger(__name__)
class CourseListView(ListAPIView):
""" List courses and modes. """
authentication_classes = (JwtAuthentication, OAuth2Authentication, SessionAuthentication,)
permission_classes = (IsAuthenticated,)
serializer_class = CourseSerializer
pagination_class = None
def get_queryset(self):
return list(Course.iterator())
class CourseRetrieveUpdateView(PutAsCreateMixin, RetrieveUpdateAPIView):
""" Retrieve, update, or create courses/modes. """
lookup_field = 'id'
lookup_url_kwarg = 'course_id'
model = CourseMode
authentication_classes = (JwtAuthentication, OAuth2Authentication, SessionAuthentication,)
permission_classes = (ApiKeyOrModelPermission,)
serializer_class = CourseSerializer
# Django Rest Framework v3 requires that we provide a queryset.
# Note that we're overriding `get_object()` below to return a `Course`
# rather than a CourseMode, so this isn't really used.
queryset = CourseMode.objects.all()
def get_object(self, queryset=None):
course_id = self.kwargs.get(self.lookup_url_kwarg)
course = Course.get(course_id)
if course:
return course
raise Http404
def pre_save(self, obj):
# There is nothing to pre-save. The default behavior changes the Course.id attribute from
# a CourseKey to a string, which is not desired.
pass
class OrderView(APIView):
""" Retrieve order details. """
authentication_classes = (JwtAuthentication, SessionAuthentication,)
permission_classes = (IsAuthenticatedOrActivationOverridden,)
def get(self, request, number):
""" HTTP handler. """
# If the account activation requirement is disabled for this installation, override the
# anonymous user object attached to the request with the actual user object (if it exists)
if not request.user.is_authenticated() and is_account_activation_requirement_disabled():
try:
request.user = User.objects.get(id=request.session._session_cache['_auth_user_id'])
except User.DoesNotExist:
return JsonResponse(status=403)
try:
order = ecommerce_api_client(request.user).orders(number).get()
return JsonResponse(order)
except exceptions.HttpNotFoundError:
return JsonResponse(status=404)
| agpl-3.0 |
christabor/flask_extras | tests/test_config.py | 2 | 1670 | """Test configuration utilities."""
from flask import Flask
from flask_extras.filters import config
class TestGetFuncs:
"""All tests for get funcs function."""
def test_get_module_funcs(self, client):
"""Test the return value."""
assert isinstance(config._get_funcs(config), dict)
def test_get_module_funcs_notempty(self, client):
"""Test the return value functions length."""
assert len(config._get_funcs(config).items()) > 0
class TestInjectFilters:
"""All tests for inject filters function."""
def test_inject_filters_inst(self, client):
"""Test the return value."""
app, test = client
assert isinstance(config._inject_filters(app, {}), Flask)
def test_inject_filters_count(self, client):
"""Test the return value."""
app, test = client
old = len(app.jinja_env.filters)
config._inject_filters(app, {'foo': lambda x: x})
new = len(app.jinja_env.filters)
assert new > old
assert 'foo' in app.jinja_env.filters
class TestConfigFlaskFilters:
"""All tests for config flask filters function."""
def test_config_filters_inst(self, client):
"""Test the return value."""
app, test = client
assert isinstance(config.config_flask_filters(app), Flask)
def test_config_filters_count(self, client):
"""Test the return value."""
app, test = client
del app.jinja_env.filters
setattr(app.jinja_env, 'filters', dict())
old = len(app.jinja_env.filters)
config.config_flask_filters(app)
new = len(app.jinja_env.filters)
assert new > old
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.