repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
mysteryemotionz/v20j-geeb | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
h-mayorquin/camp_india_2016 | tutorials/chemical switches/moose/neuroml/LIF/FvsI_LIF.py | 3 | 2004 | #!/usr/bin/python
# -*- coding: utf-8 -*-
## all SI units
########################################################################################
## Plot the firing rate vs current injection curve for a leaky integrate and fire neuron
## Author: Aditya Gilra
## Creation Date: 2012-06-08
## Modification Date: 2012-06-08
########################################################################################
import os
os.environ['NUMPTHREADS'] = '1'
import sys
sys.path.append('.')
from LIFxml_firing import *
injectmax = 1e-7 # Amperes
IF1 = create_LIF()
## edge-detect the spikes using spike-gen (table does not have edge detect)
## save spikes in table
table_path = moose.Neutral(IF1.path+'/data').path
IF1spikesTable = moose.Table(table_path+'/spikesTable')
IF1soma = moose.element(IF1.path+'/soma_0')
moose.connect(IF1soma,'spikeOut',IF1spikesTable,'input')
## from moose_utils.py sets clocks and resets/reinits
## simmethod='hsolve' doesn't work -- check resetSim
resetSim(['/cells'], SIMDT, PLOTDT, simmethod='ee')
## Loop through different current injections
freqList = []
currentvec = arange(0.4e-12, injectmax, injectmax/30.0)
### log scale for x-axis
#dlogI = log(2.5)
#logcurrentvec = arange(log(injectmax)-30*dlogI,log(injectmax),dlogI)
#currentvec = [0.0]
#currentvec.extend( [exp(I) for I in logcurrentvec] )
for currenti in currentvec:
moose.reinit()
IF1soma.inject = currenti
moose.start(RUNTIME)
spikesList = IF1spikesTable.vector
if len(spikesList)>0:
spikesList = spikesList[where(spikesList>0.0)[0]]
spikesNow = len(spikesList)
else: spikesNow = 0.0
print "For injected current =",currenti,\
"number of spikes in",RUNTIME,"seconds =",spikesNow
freqList.append( spikesNow/float(RUNTIME) )
## plot the F vs I curve of the neuron
figure(facecolor='w')
plot(currentvec, freqList,'o-',linewidth=2)
xlabel('current (A)',fontsize=24)
ylabel('frequency (Hz)',fontsize=24)
title('Leaky Integrate and Fire',fontsize=24)
show()
| mit |
vhaupert/mitmproxy | pathod/language/generators.py | 7 | 2469 | import os
import string
import random
import mmap
import sys
DATATYPES = dict(
ascii_letters=string.ascii_letters.encode(),
ascii_lowercase=string.ascii_lowercase.encode(),
ascii_uppercase=string.ascii_uppercase.encode(),
digits=string.digits.encode(),
hexdigits=string.hexdigits.encode(),
octdigits=string.octdigits.encode(),
punctuation=string.punctuation.encode(),
whitespace=string.whitespace.encode(),
ascii=string.printable.encode(),
bytes=bytes(range(256))
)
class TransformGenerator:
"""
Perform a byte-by-byte transform another generator - that is, for each
input byte, the transformation must produce one output byte.
gen: A generator to wrap
transform: A function (offset, data) -> transformed
"""
def __init__(self, gen, transform):
self.gen = gen
self.transform = transform
def __len__(self):
return len(self.gen)
def __getitem__(self, x):
d = self.gen.__getitem__(x)
if isinstance(x, slice):
return self.transform(x.start, d)
return self.transform(x, d)
def __repr__(self):
return "'transform(%s)'" % self.gen
def rand_byte(chars):
"""
Return a random character as byte from a charset.
"""
# bytearray has consistent behaviour on both Python 2 and 3
# while bytes does not
return bytes([random.choice(chars)])
class RandomGenerator:
def __init__(self, dtype, length):
self.dtype = dtype
self.length = length
def __len__(self):
return self.length
def __getitem__(self, x):
chars = DATATYPES[self.dtype]
if isinstance(x, slice):
return b"".join(rand_byte(chars) for _ in range(*x.indices(min(self.length, sys.maxsize))))
return rand_byte(chars)
def __repr__(self):
return "%s random from %s" % (self.length, self.dtype)
class FileGenerator:
def __init__(self, path):
self.path = os.path.expanduser(path)
def __len__(self):
return os.path.getsize(self.path)
def __getitem__(self, x):
with open(self.path, mode="rb") as f:
if isinstance(x, slice):
with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as mapped:
return mapped.__getitem__(x)
else:
f.seek(x)
return f.read(1)
def __repr__(self):
return "<%s" % self.path
| mit |
RockySteveJobs/python-for-android | python-modules/twisted/twisted/manhole/telnet.py | 81 | 3504 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""Telnet-based shell."""
# twisted imports
from twisted.protocols import telnet
from twisted.internet import protocol
from twisted.python import log, failure
# system imports
import string, copy, sys
from cStringIO import StringIO
class Shell(telnet.Telnet):
"""A Python command-line shell."""
def connectionMade(self):
telnet.Telnet.connectionMade(self)
self.lineBuffer = []
def loggedIn(self):
self.transport.write(">>> ")
def checkUserAndPass(self, username, password):
return ((self.factory.username == username) and (password == self.factory.password))
def write(self, data):
"""Write some data to the transport.
"""
self.transport.write(data)
def telnet_Command(self, cmd):
if self.lineBuffer:
if not cmd:
cmd = string.join(self.lineBuffer, '\n') + '\n\n\n'
self.doCommand(cmd)
self.lineBuffer = []
return "Command"
else:
self.lineBuffer.append(cmd)
self.transport.write("... ")
return "Command"
else:
self.doCommand(cmd)
return "Command"
def doCommand(self, cmd):
# TODO -- refactor this, Reality.author.Author, and the manhole shell
#to use common functionality (perhaps a twisted.python.code module?)
fn = '$telnet$'
result = None
try:
out = sys.stdout
sys.stdout = self
try:
code = compile(cmd,fn,'eval')
result = eval(code, self.factory.namespace)
except:
try:
code = compile(cmd, fn, 'exec')
exec code in self.factory.namespace
except SyntaxError, e:
if not self.lineBuffer and str(e)[:14] == "unexpected EOF":
self.lineBuffer.append(cmd)
self.transport.write("... ")
return
else:
failure.Failure().printTraceback(file=self)
log.deferr()
self.write('\r\n>>> ')
return
except:
io = StringIO()
failure.Failure().printTraceback(file=self)
log.deferr()
self.write('\r\n>>> ')
return
finally:
sys.stdout = out
self.factory.namespace['_'] = result
if result is not None:
self.transport.write(repr(result))
self.transport.write('\r\n')
self.transport.write(">>> ")
class ShellFactory(protocol.Factory):
username = "admin"
password = "admin"
protocol = Shell
service = None
def __init__(self):
self.namespace = {
'factory': self,
'service': None,
'_': None
}
def setService(self, service):
self.namespace['service'] = self.service = service
def __getstate__(self):
"""This returns the persistent state of this shell factory.
"""
dict = self.__dict__
ns = copy.copy(dict['namespace'])
dict['namespace'] = ns
if ns.has_key('__builtins__'):
del ns['__builtins__']
return dict
| apache-2.0 |
leiferikb/bitpop | depot_tools/third_party/logilab/common/daemon.py | 15 | 3335 | # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""A daemonize function (for Unices)"""
__docformat__ = "restructuredtext en"
import os
import errno
import signal
import sys
import time
import warnings
def setugid(user):
"""Change process user and group ID
Argument is a numeric user id or a user name"""
try:
from pwd import getpwuid
passwd = getpwuid(int(user))
except ValueError:
from pwd import getpwnam
passwd = getpwnam(user)
if hasattr(os, 'initgroups'): # python >= 2.7
os.initgroups(passwd.pw_name, passwd.pw_gid)
else:
import ctypes
if ctypes.CDLL(None).initgroups(passwd.pw_name, passwd.pw_gid) < 0:
err = ctypes.c_int.in_dll(ctypes.pythonapi,"errno").value
raise OSError(err, os.strerror(err), 'initgroups')
os.setgid(passwd.pw_gid)
os.setuid(passwd.pw_uid)
os.putenv('HOME', passwd.pw_dir)
def daemonize(pidfile=None, uid=None, umask=077):
"""daemonize a Unix process. Set paranoid umask by default.
Return 1 in the original process, 2 in the first fork, and None for the
second fork (eg daemon process).
"""
# http://www.faqs.org/faqs/unix-faq/programmer/faq/
#
# fork so the parent can exit
if os.fork(): # launch child and...
return 1
# disconnect from tty and create a new session
os.setsid()
# fork again so the parent, (the session group leader), can exit.
# as a non-session group leader, we can never regain a controlling
# terminal.
if os.fork(): # launch child again.
return 2
# move to the root to avoit mount pb
os.chdir('/')
# set umask if specified
if umask is not None:
os.umask(umask)
# redirect standard descriptors
null = os.open('/dev/null', os.O_RDWR)
for i in range(3):
try:
os.dup2(null, i)
except OSError, e:
if e.errno != errno.EBADF:
raise
os.close(null)
# filter warnings
warnings.filterwarnings('ignore')
# write pid in a file
if pidfile:
# ensure the directory where the pid-file should be set exists (for
# instance /var/run/cubicweb may be deleted on computer restart)
piddir = os.path.dirname(pidfile)
if not os.path.exists(piddir):
os.makedirs(piddir)
f = file(pidfile, 'w')
f.write(str(os.getpid()))
f.close()
os.chmod(pidfile, 0644)
# change process uid
if uid:
setugid(uid)
return None
| gpl-3.0 |
tdubuffet/odysseus | vendor/doctrine/orm/docs/en/_exts/configurationblock.py | 2577 | 3506 | #Copyright (c) 2010 Fabien Potencier
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
from docutils.parsers.rst import Directive, directives
from docutils import nodes
from string import upper
class configurationblock(nodes.General, nodes.Element):
pass
class ConfigurationBlock(Directive):
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
formats = {
'html': 'HTML',
'xml': 'XML',
'php': 'PHP',
'yaml': 'YAML',
'jinja': 'Twig',
'html+jinja': 'Twig',
'jinja+html': 'Twig',
'php+html': 'PHP',
'html+php': 'PHP',
'ini': 'INI',
'php-annotations': 'Annotations',
}
def run(self):
env = self.state.document.settings.env
node = nodes.Element()
node.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, node)
entries = []
for i, child in enumerate(node):
if isinstance(child, nodes.literal_block):
# add a title (the language name) before each block
#targetid = "configuration-block-%d" % env.new_serialno('configuration-block')
#targetnode = nodes.target('', '', ids=[targetid])
#targetnode.append(child)
innernode = nodes.emphasis(self.formats[child['language']], self.formats[child['language']])
para = nodes.paragraph()
para += [innernode, child]
entry = nodes.list_item('')
entry.append(para)
entries.append(entry)
resultnode = configurationblock()
resultnode.append(nodes.bullet_list('', *entries))
return [resultnode]
def visit_configurationblock_html(self, node):
self.body.append(self.starttag(node, 'div', CLASS='configuration-block'))
def depart_configurationblock_html(self, node):
self.body.append('</div>\n')
def visit_configurationblock_latex(self, node):
pass
def depart_configurationblock_latex(self, node):
pass
def setup(app):
app.add_node(configurationblock,
html=(visit_configurationblock_html, depart_configurationblock_html),
latex=(visit_configurationblock_latex, depart_configurationblock_latex))
app.add_directive('configuration-block', ConfigurationBlock)
| gpl-2.0 |
alexcrichton/gyp | pylib/gyp/generator/gypsh.py | 2779 | 1665 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypsh output module
gypsh is a GYP shell. It's not really a generator per se. All it does is
fire up an interactive Python session with a few local variables set to the
variables passed to the generator. Like gypd, it's intended as a debugging
aid, to facilitate the exploration of .gyp structures after being processed
by the input module.
The expected usage is "gyp -f gypsh -D OS=desired_os".
"""
import code
import sys
# All of this stuff about generator variables was lovingly ripped from gypd.py.
# That module has a much better description of what's going on and why.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
generator_default_variables = {
}
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
locals = {
'target_list': target_list,
'target_dicts': target_dicts,
'data': data,
}
# Use a banner that looks like the stock Python one and like what
# code.interact uses by default, but tack on something to indicate what
# locals are available, and identify gypsh.
banner='Python %s on %s\nlocals.keys() = %s\ngypsh' % \
(sys.version, sys.platform, repr(sorted(locals.keys())))
code.interact(banner, local=locals)
| bsd-3-clause |
TeamJB/android_kernel_lge_hammerhead | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
cparawhore/ProyectoSubastas | site-packages/django/contrib/gis/tests/relatedapp/models.py | 75 | 1786 | from django.contrib.gis.db import models
from django.utils.encoding import python_2_unicode_compatible
class SimpleModel(models.Model):
objects = models.GeoManager()
class Meta:
abstract = True
app_label = 'relatedapp'
@python_2_unicode_compatible
class Location(SimpleModel):
point = models.PointField()
def __str__(self):
return self.point.wkt
@python_2_unicode_compatible
class City(SimpleModel):
name = models.CharField(max_length=50)
state = models.CharField(max_length=2)
location = models.ForeignKey(Location)
def __str__(self):
return self.name
class AugmentedLocation(Location):
extra_text = models.TextField(blank=True)
objects = models.GeoManager()
class Meta:
app_label = 'relatedapp'
class DirectoryEntry(SimpleModel):
listing_text = models.CharField(max_length=50)
location = models.ForeignKey(AugmentedLocation)
@python_2_unicode_compatible
class Parcel(SimpleModel):
name = models.CharField(max_length=30)
city = models.ForeignKey(City)
center1 = models.PointField()
# Throwing a curveball w/`db_column` here.
center2 = models.PointField(srid=2276, db_column='mycenter')
border1 = models.PolygonField()
border2 = models.PolygonField(srid=2276)
def __str__(self):
return self.name
# These use the GeoManager but do not have any geographic fields.
class Author(SimpleModel):
name = models.CharField(max_length=100)
dob = models.DateField()
class Article(SimpleModel):
title = models.CharField(max_length=100)
author = models.ForeignKey(Author, unique=True)
class Book(SimpleModel):
title = models.CharField(max_length=100)
author = models.ForeignKey(Author, related_name='books', null=True)
| mit |
josherick/bokeh | bokeh/server/services.py | 29 | 2719 | from __future__ import absolute_import
import json
import os
import signal
import subprocess
import sys
class ManagedProcess(object):
def __init__(self, args, name, pidfilename,
stdout=None, stdin=None, stderr=None,
kill_old=True):
self.name = name
self.pidfilename = pidfilename
data = self.read_pidfile()
pid = data.get(name)
if pid and kill_old:
try:
os.kill(pid, signal.SIGINT)
except OSError:
#this is ok, just means process is not running
pass
elif pid and not kill_old:
raise Exception("process %s is running on PID %s" % (name, pid))
try:
self.proc = subprocess.Popen(args, stdout=stdout, stderr=stderr, stdin=stdin)
except OSError as error:
raise OSError(error.errno, "unable to execute: %s" % " ".join(args))
self.add_to_pidfile()
self.closed = False
def read_pidfile(self):
if os.path.exists(self.pidfilename):
with open(self.pidfilename, "r") as f:
data = json.load(f)
else:
data = {}
return data
def add_to_pidfile(self):
data = self.read_pidfile()
data[self.name] = self.proc.pid
with open(self.pidfilename, "w+") as f:
json.dump(data, f)
def remove_from_pidfile(self):
data = self.read_pidfile()
if self.name in data:
del data[self.name]
with open(self.pidfilename, "w+") as f:
json.dump(data, f)
def close(self):
if not self.closed:
self.proc.kill()
self.proc.communicate()
self.remove_from_pidfile()
self.closed = True
def start_redis(pidfilename, port, data_dir, loglevel="warning",
data_file='redis.db', save=True,
stdout=sys.stdout, stderr=sys.stderr):
base_config = os.path.join(os.path.dirname(__file__), 'redis.conf')
with open(base_config) as f:
redisconf = f.read()
savestr = ''
if save: savestr = 'save 10 1'
redisconf = redisconf % {'port' : port,
'dbdir' : data_dir,
'dbfile' : data_file,
'loglevel' : loglevel,
'save' : savestr}
mproc = ManagedProcess(['redis-server', '-'], 'redis', pidfilename,
stdout=stdout,
stderr=stderr,
stdin=subprocess.PIPE
)
mproc.proc.stdin.write(redisconf.encode())
mproc.proc.stdin.close()
return mproc
| bsd-3-clause |
fbradyirl/home-assistant | homeassistant/components/mochad/light.py | 1 | 4739 | """Support for X10 dimmer over Mochad."""
import logging
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
SUPPORT_BRIGHTNESS,
Light,
PLATFORM_SCHEMA,
)
from homeassistant.components import mochad
from homeassistant.const import CONF_NAME, CONF_PLATFORM, CONF_DEVICES, CONF_ADDRESS
from homeassistant.helpers import config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_BRIGHTNESS_LEVELS = "brightness_levels"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PLATFORM): mochad.DOMAIN,
CONF_DEVICES: [
{
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_ADDRESS): cv.x10_address,
vol.Optional(mochad.CONF_COMM_TYPE): cv.string,
vol.Optional(CONF_BRIGHTNESS_LEVELS, default=32): vol.All(
vol.Coerce(int), vol.In([32, 64, 256])
),
}
],
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up X10 dimmers over a mochad controller."""
devs = config.get(CONF_DEVICES)
add_entities([MochadLight(hass, mochad.CONTROLLER.ctrl, dev) for dev in devs])
return True
class MochadLight(Light):
"""Representation of a X10 dimmer over Mochad."""
def __init__(self, hass, ctrl, dev):
"""Initialize a Mochad Light Device."""
from pymochad import device
self._controller = ctrl
self._address = dev[CONF_ADDRESS]
self._name = dev.get(CONF_NAME, "x10_light_dev_{}".format(self._address))
self._comm_type = dev.get(mochad.CONF_COMM_TYPE, "pl")
self.light = device.Device(ctrl, self._address, comm_type=self._comm_type)
self._brightness = 0
self._state = self._get_device_status()
self._brightness_levels = dev.get(CONF_BRIGHTNESS_LEVELS) - 1
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
def _get_device_status(self):
"""Get the status of the light from mochad."""
with mochad.REQ_LOCK:
status = self.light.get_status().rstrip()
return status == "on"
@property
def name(self):
"""Return the display name of this light."""
return self._name
@property
def is_on(self):
"""Return true if the light is on."""
return self._state
@property
def supported_features(self):
"""Return supported features."""
return SUPPORT_BRIGHTNESS
@property
def assumed_state(self):
"""X10 devices are normally 1-way so we have to assume the state."""
return True
def _calculate_brightness_value(self, value):
return int(value * (float(self._brightness_levels) / 255.0))
def _adjust_brightness(self, brightness):
if self._brightness > brightness:
bdelta = self._brightness - brightness
mochad_brightness = self._calculate_brightness_value(bdelta)
self.light.send_cmd("dim {}".format(mochad_brightness))
self._controller.read_data()
elif self._brightness < brightness:
bdelta = brightness - self._brightness
mochad_brightness = self._calculate_brightness_value(bdelta)
self.light.send_cmd("bright {}".format(mochad_brightness))
self._controller.read_data()
def turn_on(self, **kwargs):
"""Send the command to turn the light on."""
brightness = kwargs.get(ATTR_BRIGHTNESS, 255)
with mochad.REQ_LOCK:
if self._brightness_levels > 32:
out_brightness = self._calculate_brightness_value(brightness)
self.light.send_cmd("xdim {}".format(out_brightness))
self._controller.read_data()
else:
self.light.send_cmd("on")
self._controller.read_data()
# There is no persistence for X10 modules so a fresh on command
# will be full brightness
if self._brightness == 0:
self._brightness = 255
self._adjust_brightness(brightness)
self._brightness = brightness
self._state = True
def turn_off(self, **kwargs):
"""Send the command to turn the light on."""
with mochad.REQ_LOCK:
self.light.send_cmd("off")
self._controller.read_data()
# There is no persistence for X10 modules so we need to prepare
# to track a fresh on command will full brightness
if self._brightness_levels == 31:
self._brightness = 0
self._state = False
| apache-2.0 |
NickWoodhams/flask-admin | flask_admin/tests/sqla/test_multi_pk.py | 46 | 5501 | from nose.tools import eq_, ok_
from . import setup
from .test_basic import CustomModelView
from flask_sqlalchemy import Model
from sqlalchemy.ext.declarative import declarative_base
def test_multiple_pk():
# Test multiple primary keys - mix int and string together
app, db, admin = setup()
class Model(db.Model):
id = db.Column(db.Integer, primary_key=True)
id2 = db.Column(db.String(20), primary_key=True)
test = db.Column(db.String)
db.create_all()
view = CustomModelView(Model, db.session, form_columns=['id', 'id2', 'test'])
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/model/')
eq_(rv.status_code, 200)
rv = client.post('/admin/model/new/',
data=dict(id=1, id2='two', test='test3'))
eq_(rv.status_code, 302)
rv = client.get('/admin/model/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test3' in data)
rv = client.get('/admin/model/edit/?id=1,two')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test3' in data)
# Correct order is mandatory -> fail here
rv = client.get('/admin/model/edit/?id=two,1')
eq_(rv.status_code, 302)
def test_joined_inheritance():
# Test multiple primary keys - mix int and string together
app, db, admin = setup()
class Parent(db.Model):
id = db.Column(db.Integer, primary_key=True)
test = db.Column(db.String)
discriminator = db.Column('type', db.String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Child(Parent):
__tablename__ = 'children'
__mapper_args__ = {'polymorphic_identity': 'child'}
id = db.Column(db.ForeignKey(Parent.id), primary_key=True)
name = db.Column(db.String(100))
db.create_all()
view = CustomModelView(Child, db.session, form_columns=['id', 'test', 'name'])
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/child/')
eq_(rv.status_code, 200)
rv = client.post('/admin/child/new/',
data=dict(id=1, test='foo', name='bar'))
eq_(rv.status_code, 302)
rv = client.get('/admin/child/edit/?id=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('foo' in data)
ok_('bar' in data)
def test_single_table_inheritance():
# Test multiple primary keys - mix int and string together
app, db, admin = setup()
CustomModel = declarative_base(Model, name='Model')
class Parent(CustomModel):
__tablename__ = 'parent'
id = db.Column(db.Integer, primary_key=True)
test = db.Column(db.String)
discriminator = db.Column('type', db.String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Child(Parent):
__mapper_args__ = {'polymorphic_identity': 'child'}
name = db.Column(db.String(100))
CustomModel.metadata.create_all(db.engine)
view = CustomModelView(Child, db.session, form_columns=['id', 'test', 'name'])
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/child/')
eq_(rv.status_code, 200)
rv = client.post('/admin/child/new/',
data=dict(id=1, test='foo', name='bar'))
eq_(rv.status_code, 302)
rv = client.get('/admin/child/edit/?id=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('foo' in data)
ok_('bar' in data)
def test_concrete_table_inheritance():
# Test multiple primary keys - mix int and string together
app, db, admin = setup()
class Parent(db.Model):
id = db.Column(db.Integer, primary_key=True)
test = db.Column(db.String)
class Child(Parent):
__mapper_args__ = {'concrete': True}
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100))
test = db.Column(db.String)
db.create_all()
view = CustomModelView(Child, db.session, form_columns=['id', 'test', 'name'])
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/child/')
eq_(rv.status_code, 200)
rv = client.post('/admin/child/new/',
data=dict(id=1, test='foo', name='bar'))
eq_(rv.status_code, 302)
rv = client.get('/admin/child/edit/?id=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('foo' in data)
ok_('bar' in data)
def test_concrete_multipk_inheritance():
# Test multiple primary keys - mix int and string together
app, db, admin = setup()
class Parent(db.Model):
id = db.Column(db.Integer, primary_key=True)
test = db.Column(db.String)
class Child(Parent):
__mapper_args__ = {'concrete': True}
id = db.Column(db.Integer, primary_key=True)
id2 = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100))
test = db.Column(db.String)
db.create_all()
view = CustomModelView(Child, db.session, form_columns=['id', 'id2', 'test', 'name'])
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/child/')
eq_(rv.status_code, 200)
rv = client.post('/admin/child/new/',
data=dict(id=1, id2=2, test='foo', name='bar'))
eq_(rv.status_code, 302)
rv = client.get('/admin/child/edit/?id=1,2')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('foo' in data)
ok_('bar' in data)
| bsd-3-clause |
keerts/home-assistant | homeassistant/components/envisalink.py | 4 | 7683 | """
Support for Envisalink devices.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/envisalink/
"""
import logging
import time
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.helpers.entity import Entity
from homeassistant.components.discovery import load_platform
REQUIREMENTS = ['pyenvisalink==2.0', 'pydispatcher==2.0.5']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'envisalink'
EVL_CONTROLLER = None
CONF_EVL_HOST = 'host'
CONF_EVL_PORT = 'port'
CONF_PANEL_TYPE = 'panel_type'
CONF_EVL_VERSION = 'evl_version'
CONF_CODE = 'code'
CONF_USERNAME = 'user_name'
CONF_PASS = 'password'
CONF_EVL_KEEPALIVE = 'keepalive_interval'
CONF_ZONEDUMP_INTERVAL = 'zonedump_interval'
CONF_ZONES = 'zones'
CONF_PARTITIONS = 'partitions'
CONF_ZONENAME = 'name'
CONF_ZONETYPE = 'type'
CONF_PARTITIONNAME = 'name'
CONF_PANIC = 'panic_type'
DEFAULT_PORT = 4025
DEFAULT_EVL_VERSION = 3
DEFAULT_KEEPALIVE = 60
DEFAULT_ZONEDUMP_INTERVAL = 30
DEFAULT_ZONETYPE = 'opening'
DEFAULT_PANIC = 'Police'
SIGNAL_ZONE_UPDATE = 'zones_updated'
SIGNAL_PARTITION_UPDATE = 'partition_updated'
SIGNAL_KEYPAD_UPDATE = 'keypad_updated'
ZONE_SCHEMA = vol.Schema({
vol.Required(CONF_ZONENAME): cv.string,
vol.Optional(CONF_ZONETYPE, default=DEFAULT_ZONETYPE): cv.string})
PARTITION_SCHEMA = vol.Schema({
vol.Required(CONF_PARTITIONNAME): cv.string})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_EVL_HOST): cv.string,
vol.Required(CONF_PANEL_TYPE):
vol.All(cv.string, vol.In(['HONEYWELL', 'DSC'])),
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASS): cv.string,
vol.Required(CONF_CODE): cv.string,
vol.Optional(CONF_PANIC, default=DEFAULT_PANIC): cv.string,
vol.Optional(CONF_ZONES): {vol.Coerce(int): ZONE_SCHEMA},
vol.Optional(CONF_PARTITIONS): {vol.Coerce(int): PARTITION_SCHEMA},
vol.Optional(CONF_EVL_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_EVL_VERSION, default=DEFAULT_EVL_VERSION):
vol.All(vol.Coerce(int), vol.Range(min=3, max=4)),
vol.Optional(CONF_EVL_KEEPALIVE, default=DEFAULT_KEEPALIVE):
vol.All(vol.Coerce(int), vol.Range(min=15)),
vol.Optional(CONF_ZONEDUMP_INTERVAL,
default=DEFAULT_ZONEDUMP_INTERVAL):
vol.All(vol.Coerce(int), vol.Range(min=15)),
}),
}, extra=vol.ALLOW_EXTRA)
# pylint: disable=unused-argument
def setup(hass, base_config):
"""Common setup for Envisalink devices."""
from pyenvisalink import EnvisalinkAlarmPanel
from pydispatch import dispatcher
global EVL_CONTROLLER
config = base_config.get(DOMAIN)
_host = config.get(CONF_EVL_HOST)
_port = config.get(CONF_EVL_PORT)
_code = config.get(CONF_CODE)
_panel_type = config.get(CONF_PANEL_TYPE)
_panic_type = config.get(CONF_PANIC)
_version = config.get(CONF_EVL_VERSION)
_user = config.get(CONF_USERNAME)
_pass = config.get(CONF_PASS)
_keep_alive = config.get(CONF_EVL_KEEPALIVE)
_zone_dump = config.get(CONF_ZONEDUMP_INTERVAL)
_zones = config.get(CONF_ZONES)
_partitions = config.get(CONF_PARTITIONS)
_connect_status = {}
EVL_CONTROLLER = EnvisalinkAlarmPanel(_host,
_port,
_panel_type,
_version,
_user,
_pass,
_zone_dump,
_keep_alive,
hass.loop)
def login_fail_callback(data):
"""Callback for when the evl rejects our login."""
_LOGGER.error("The envisalink rejected your credentials.")
_connect_status['fail'] = 1
def connection_fail_callback(data):
"""Network failure callback."""
_LOGGER.error("Could not establish a connection with the envisalink.")
_connect_status['fail'] = 1
def connection_success_callback(data):
"""Callback for a successful connection."""
_LOGGER.info("Established a connection with the envisalink.")
_connect_status['success'] = 1
def zones_updated_callback(data):
"""Handle zone timer updates."""
_LOGGER.info("Envisalink sent a zone update event. Updating zones...")
dispatcher.send(signal=SIGNAL_ZONE_UPDATE,
sender=None,
zone=data)
def alarm_data_updated_callback(data):
"""Handle non-alarm based info updates."""
_LOGGER.info("Envisalink sent new alarm info. Updating alarms...")
dispatcher.send(signal=SIGNAL_KEYPAD_UPDATE,
sender=None,
partition=data)
def partition_updated_callback(data):
"""Handle partition changes thrown by evl (including alarms)."""
_LOGGER.info("The envisalink sent a partition update event.")
dispatcher.send(signal=SIGNAL_PARTITION_UPDATE,
sender=None,
partition=data)
def stop_envisalink(event):
"""Shutdown envisalink connection and thread on exit."""
_LOGGER.info("Shutting down envisalink.")
EVL_CONTROLLER.stop()
def start_envisalink(event):
"""Startup process for the Envisalink."""
hass.loop.call_soon_threadsafe(EVL_CONTROLLER.start)
for _ in range(10):
if 'success' in _connect_status:
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_envisalink)
return True
elif 'fail' in _connect_status:
return False
else:
time.sleep(1)
_LOGGER.error("Timeout occurred while establishing evl connection.")
return False
EVL_CONTROLLER.callback_zone_timer_dump = zones_updated_callback
EVL_CONTROLLER.callback_zone_state_change = zones_updated_callback
EVL_CONTROLLER.callback_partition_state_change = partition_updated_callback
EVL_CONTROLLER.callback_keypad_update = alarm_data_updated_callback
EVL_CONTROLLER.callback_login_failure = login_fail_callback
EVL_CONTROLLER.callback_login_timeout = connection_fail_callback
EVL_CONTROLLER.callback_login_success = connection_success_callback
_result = start_envisalink(None)
if not _result:
return False
# Load sub-components for Envisalink
if _partitions:
load_platform(hass, 'alarm_control_panel', 'envisalink',
{CONF_PARTITIONS: _partitions,
CONF_CODE: _code,
CONF_PANIC: _panic_type}, base_config)
load_platform(hass, 'sensor', 'envisalink',
{CONF_PARTITIONS: _partitions,
CONF_CODE: _code}, base_config)
if _zones:
load_platform(hass, 'binary_sensor', 'envisalink',
{CONF_ZONES: _zones}, base_config)
return True
class EnvisalinkDevice(Entity):
"""Representation of an Envisalink device."""
def __init__(self, name, info, controller):
"""Initialize the device."""
self._controller = controller
self._info = info
self._name = name
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
| apache-2.0 |
JoErNanO/brianmodel | brianmodel/neuron/neuron.py | 1 | 5023 | #!/usr/bin/python
# coding: utf-8
# #################################################################################
# Copyright (C) 2014 Francesco Giovannini, Neurosys - INRIA CR Nancy - Grand Est
# Authors: Francesco Giovannini
# email: francesco.giovannini@inria.fr
# website: http://neurosys.loria.fr/
# Permission is granted to copy, distribute, and/or modify this program
# under the terms of the GNU General Public License, version 3 or any
# later version published by the Free Software Foundation.
#
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details
# #################################################################################
"""
neuron
~~~~~~~~~~~~~
This module contains an abstraction of a Brian-compatible neuron, represented as a cell with a list of :class:`IonicCurrent`'s.
:copyright 2014 Francesco Giovannini, Neurosys - INRIA CR Nancy - Grand Est
:licence GPLv3, see LICENCE for more details
"""
from utilities import utilities as utilities
import ioniccurrent.ioniccurrentfactory as icf
import yaml
## ***************************************************************************************************************** ##
class Neuron(object):
"""
The :class:`Neuron` represents a biological neuron with a set of properties, and a list of :class:`IonicCurrent`'s flowing through its membrane.
"""
## ************************************************************ ##
def __init__(self, parameters):
"""
Default constructor.
:param area: the area of the soma of the neural cell
:type area: float
:param conductance: the conductance of the neural cell
:type conductance: float
"""
# Initialise attributes
self.parameters = parameters.values()[0]
self.name = parameters.keys()[0]
self.area = self.parameters['area']
self.conductance = self.parameters['conductance']
# Initialise list of defined currents - FG: fixes bug due to having an empty list of defined currents when including all the needed ones
if 'defined' not in self.parameters['currents']: # Keyword 'defined' doesn't exists
self.parameters['currents']['defined'] = []
elif self.parameters['currents']['defined'] is None: # List of 'defined' currents is undefined/empty
self.parameters['currents']['defined'] = []
# Check for parameters specified as include files
for f in self.parameters['currents'].get('included', []):
try:
with open(f) as curr:
# Load included currents from file
includes = yaml.load(curr)
# Add them the list of defined currents
self.parameters['currents'].get('defined', []).extend(includes)
except IOError:
raise IOError('Cannot load current parameter file named ' + f)
# Remove list of includesd currents from dict of currents
self.parameters['currents'].pop('included', [])
# Initialise ionic current factory
self.factory = icf.IonicCurrentFactory()
# Build current list
self.currents = []
for currentParams in self.parameters['currents'].get('defined', []):
tmpCurrent = self.factory.makeIonicCurrent(currentParams, self.area)
self.currents.append(tmpCurrent)
# Store safe string representation of parameters
self._area = utilities.getSafeStringParam(self.area)
self._conductance = utilities.getSafeStringParam(utilities.getSafeStringParam(self.conductance) + ' * ' + self._area)
## ************************************************************ ##
## ************************************************************ ##
def getNeuronString(self):
"""
Generate the string representation of the neural cell model.
"""
res = ""
# Neuron model equation
dvdt = '''dv/dt = ('''
# Add current equations
for curr in self.currents:
dvdt += ''' - ''' + curr.name # Add current name to dvdt equation
res += curr.getIonicCurrentString() # Add current equation to neuron model
dvdt += ''' + I_stim) / ''' + self._conductance + ''' : volt \n''' # Append conductance division
# Check Voltage clamp
if self.parameters.has_key('vClamp') and self.parameters['vClamp']:
dvdt = '''v : volt \n'''
# Stimulus current
istim = '''I_stim : amp'''
# Build final neuron model equation
res = dvdt + res + istim
return res
## ************************************************************ ##
## ***************************************************************************************************************** ##
| gpl-3.0 |
ronnyandersson/zignal | examples/ex_chunks.py | 1 | 2576 | '''
Created on 12 Apr 2020
@author: Ronny Andersson (ronny@andersson.tk)
@copyright: (c) 2020 Ronny Andersson
@license: MIT
Demo of how to iterate over an instance of the Audio class, for chunk-based
processing. Typically the chunks have a size that is a power of two, for
example 256, 1024 or 4096. In this example the chunk size is set to 1000
for simplicity in the plots. The sample rate in this example is also set to
a value that enhances the effect of the example, since hera a chunk equals
to one second of data.
'''
# Standard library
import logging
# Third party
import matplotlib.pyplot as plt
import numpy as np
# Internal
import zignal
if __name__ == '__main__':
logging.basicConfig(
format='%(levelname)-7s: %(module)s.%(funcName)-15s %(message)s',
level='DEBUG',
)
logging.getLogger("matplotlib").setLevel(logging.INFO)
logging.getLogger("zignal").setLevel(logging.DEBUG)
fs = 1000
# Create various ramp signals, to visualise the chunks better. Not real
# audio, but shows in a plot what the chunks look like
a1 = zignal.Audio(fs=fs, initialdata=np.linspace(0, 1, num=(1000/2)))
a2 = zignal.Audio(fs=fs, initialdata=np.linspace(0, -1, num=(1000*1)+500))
a3 = zignal.Audio(fs=fs, initialdata=np.linspace(0, 1, num=(1000*2)+200))
a = zignal.Audio(fs=fs)
a.append(a1, a2, a3)
print(a)
# We now have 2.2 seconds of audio in three channels. This does not add up
# to even chunk sizes, so padding will have to be done in order to iterate.
#
# Three (3) chunks are expected.
for val in a.iter_chunks(chunksize=1000):
print("------------------------------------------------")
print("shape of data in chunk: %s" % str(val.shape))
print(val)
plt.figure(1)
plt.plot(val[:, 0], ls="-", label="a1")
plt.plot(val[:, 1], ls="--", label="a2")
plt.plot(val[:, 2], ls="-.", label="a3")
plt.grid()
plt.ylim(-1.1, 1.1)
plt.xlabel("samples in chunk")
plt.ylabel("magnitude [lin]")
plt.legend(loc="upper right")
plt.show()
# We can pad beforehand if we know how many samples are missing, then no
# padding will occur inside the iterator
b = a.copy()
b.gain(-20) # just to get a debug logging entry
b.pad(nofsamples=800)
print(b)
for val in b.iter_chunks(chunksize=1000):
print("------------------------------------------------")
print("shape of data in chunk: %s" % str(val.shape))
print(val)
print('-- Done --')
| mit |
hnakamur/django-admin2 | example/blog/tests/test_permissions.py | 2 | 11225 | from django.contrib.auth.models import User, Permission
from django.core.urlresolvers import reverse
from django.template import Template, Context
from django.test import TestCase
from django.test.client import RequestFactory
import djadmin2
from djadmin2 import ModelAdmin2
from djadmin2.permissions import TemplatePermissionChecker
from blog.models import Post
class TemplatePermissionTest(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = User(
username='admin',
is_staff=True)
self.user.set_password('admin')
self.user.save()
def render(self, template, context):
template = Template(template)
context = Context(context)
return template.render(context)
def test_permission_wrapper(self):
model_admin = ModelAdmin2(Post, djadmin2.default)
request = self.factory.get(reverse('admin2:blog_post_index'))
request.user = self.user
permissions = TemplatePermissionChecker(request, model_admin)
context = {
'permissions': permissions,
}
result = self.render(
'{{ permissions.has_unvalid_permission }}',
context)
self.assertEqual(result, '')
result = self.render('{{ permissions.has_add_permission }}', context)
self.assertEqual(result, 'False')
post_add_permission = Permission.objects.get(
content_type__app_label='blog',
content_type__model='post',
codename='add_post')
self.user.user_permissions.add(post_add_permission)
# invalidate the users permission cache
if hasattr(self.user, '_perm_cache'):
del self.user._perm_cache
result = self.render('{{ permissions.has_add_permission }}', context)
self.assertEqual(result, 'True')
def test_admin_traversal_by_name(self):
post_add_permission = Permission.objects.get(
content_type__app_label='blog',
content_type__model='post',
codename='add_post')
self.user.user_permissions.add(post_add_permission)
model_admin = ModelAdmin2(Post, djadmin2.default)
request = self.factory.get(reverse('admin2:blog_post_index'))
request.user = self.user
permissions = TemplatePermissionChecker(request, model_admin)
context = {
'permissions': permissions,
}
result = self.render('{{ permissions.has_add_permission }}', context)
self.assertEqual(result, 'True')
result = self.render('{{ permissions.blog_post.has_add_permission }}', context)
self.assertEqual(result, 'True')
result = self.render('{{ permissions.blog_post.has_change_permission }}', context)
self.assertEqual(result, 'False')
result = self.render('{{ permissions.auth_user.has_delete_permission }}', context)
self.assertEqual(result, 'False')
result = self.render(
'{{ permissions.unknown_app.has_add_permission }}',
context)
self.assertEqual(result, '')
result = self.render(
'{{ permissions.blog_post.has_unvalid_permission }}',
context)
self.assertEqual(result, '')
def test_admin_binding(self):
user_admin = djadmin2.default.get_admin_by_name('auth_user')
post_admin = djadmin2.default.get_admin_by_name('blog_post')
request = self.factory.get(reverse('admin2:auth_user_index'))
request.user = self.user
permissions = TemplatePermissionChecker(request, user_admin)
post = Post.objects.create(title='Hello', body='world')
context = {
'post': post,
'post_admin': post_admin,
'permissions': permissions,
}
result = self.render(
'{% load admin2_tags %}'
'{{ permissions|for_admin:post_admin }}',
context)
self.assertEqual(result, '')
result = self.render(
'{% load admin2_tags %}'
'{{ permissions.has_add_permission }}'
'{% with permissions|for_admin:post_admin as permissions %}'
'{{ permissions.has_add_permission }}'
'{% endwith %}',
context)
self.assertEqual(result, 'FalseFalse')
post_add_permission = Permission.objects.get(
content_type__app_label='blog',
content_type__model='post',
codename='add_post')
self.user.user_permissions.add(post_add_permission)
# invalidate the users permission cache
if hasattr(self.user, '_perm_cache'):
del self.user._perm_cache
result = self.render(
'{% load admin2_tags %}'
'{{ permissions.has_add_permission }}'
'{% with permissions|for_admin:post_admin as permissions %}'
'{{ permissions.has_add_permission }}'
'{% endwith %}'
'{{ permissions.blog_post.has_add_permission }}',
context)
self.assertEqual(result, 'FalseTrueTrue')
# giving a string (the name of the admin) also works
result = self.render(
'{% load admin2_tags %}'
'{% with permissions|for_admin:"blog_post" as permissions %}'
'{{ permissions.has_add_permission }}'
'{% endwith %}',
context)
self.assertEqual(result, 'True')
# testing invalid admin names
result = self.render(
'{% load admin2_tags %}'
'{% with permissions|for_admin:"invalid_admin_name" as permissions %}'
'{{ permissions.has_add_permission }}'
'{% endwith %}',
context)
self.assertEqual(result, '')
def test_view_binding(self):
user_admin = djadmin2.default.get_admin_by_name('auth_user')
post_admin = djadmin2.default.get_admin_by_name('blog_post')
request = self.factory.get(reverse('admin2:auth_user_index'))
request.user = self.user
permissions = TemplatePermissionChecker(request, user_admin)
context = {
'post_admin': post_admin,
'post_add_view': post_admin.create_view,
'permissions': permissions,
}
result = self.render(
'{% load admin2_tags %}'
'{{ permissions|for_view:"add" }}',
context)
self.assertEqual(result, 'False')
# view classes are not supported yet
result = self.render(
'{% load admin2_tags %}'
'{{ permissions|for_view:post_add_view }}',
context)
self.assertEqual(result, '')
result = self.render(
'{% load admin2_tags %}'
# user add permission
'{{ permissions.has_add_permission }}'
'{% with permissions|for_admin:"blog_post"|for_view:"add" as post_add_perm %}'
# post add permission
'{{ post_add_perm }}'
'{% endwith %}',
context)
self.assertEqual(result, 'FalseFalse')
post_add_permission = Permission.objects.get(
content_type__app_label='blog',
content_type__model='post',
codename='add_post')
self.user.user_permissions.add(post_add_permission)
user_change_permission = Permission.objects.get(
content_type__app_label='auth',
content_type__model='user',
codename='change_user')
self.user.user_permissions.add(user_change_permission)
# invalidate the users permission cache
if hasattr(self.user, '_perm_cache'):
del self.user._perm_cache
result = self.render(
'{% load admin2_tags %}'
# user add permission
'{{ permissions.has_add_permission }}'
'{% with permissions|for_admin:"blog_post"|for_view:"add" as post_add_perm %}'
# post add permission
'{{ post_add_perm }}'
'{% endwith %}'
# user change permission
'{{ permissions|for_view:"change" }}',
context)
self.assertEqual(result, 'FalseTrueTrue')
# giving a string (the name of the view) also works
result = self.render(
'{% load admin2_tags %}'
'{% with permissions|for_view:"change" as user_change_perm %}'
'1{{ user_change_perm }}'
'2{{ user_change_perm|for_view:"add" }}'
# this shouldn't return True or False but '' since the
# previously bound change view doesn't belong to the newly
# bound blog_post admin
'3{{ user_change_perm|for_admin:"blog_post" }}'
'4{{ user_change_perm|for_admin:"blog_post"|for_view:"add" }}'
'{% endwith %}',
context)
self.assertEqual(result, '1True2False34True')
def test_object_level_permission(self):
model_admin = ModelAdmin2(Post, djadmin2.default)
request = self.factory.get(reverse('admin2:blog_post_index'))
request.user = self.user
permissions = TemplatePermissionChecker(request, model_admin)
post = Post.objects.create(title='Hello', body='world')
context = {
'post': post,
'permissions': permissions,
}
result = self.render(
'{% load admin2_tags %}'
'{{ permissions.has_unvalid_permission|for_object:post }}',
context)
self.assertEqual(result, '')
result = self.render(
'{% load admin2_tags %}'
'{{ permissions.has_add_permission|for_object:post }}',
context)
self.assertEqual(result, 'False')
post_add_permission = Permission.objects.get(
content_type__app_label='blog',
content_type__model='post',
codename='add_post')
self.user.user_permissions.add(post_add_permission)
# invalidate the users permission cache
if hasattr(self.user, '_perm_cache'):
del self.user._perm_cache
# object level permission are not supported by default. So this will
# return ``False``.
result = self.render(
'{% load admin2_tags %}'
'{{ permissions.has_add_permission }}'
'{{ permissions.has_add_permission|for_object:post }}',
context)
self.assertEqual(result, 'TrueFalse')
# binding an object and then checking for a specific view also works
result = self.render(
'{% load admin2_tags %}'
'{{ permissions.has_add_permission }}'
'{% with permissions|for_object:post as permissions %}'
'{{ permissions.has_add_permission }}'
'{% endwith %}',
context)
self.assertEqual(result, 'TrueFalse')
class ViewPermissionTest(TestCase):
def test_view_permission_was_created(self):
permissions = Permission.objects.filter(
content_type__app_label='blog',
content_type__model='post')
self.assertEqual(len(permissions.filter(codename='view_post')), 1)
| bsd-3-clause |
meredith-digops/ansible | lib/ansible/modules/network/ovs/openvswitch_db.py | 11 | 6161 | #!/usr/bin/python
# coding: utf-8 -*-
# pylint: disable=C0111
#
# (c) 2015, Mark Hamilton <mhamilton@vmware.com>
#
# Portions copyright @ 2015 VMware, Inc.
#
# This file is part of Ansible
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: openvswitch_db
author: "Mark Hamilton (mhamilton@vmware.com)"
version_added: 2.0
short_description: Configure open vswitch database.
requirements: [ "ovs-vsctl >= 2.3.3" ]
description:
- Set column values in record in database table.
options:
state:
required: false
description:
- Configures the state of the key. When set
to I(present), the I(key) and I(value) pair will be set
on the I(record) and when set to I(absent) the I(key)
will not be set.
default: present
choices: ['present', 'absent']
version_added: "2.4"
table:
required: true
description:
- Identifies the table in the database.
record:
required: true
description:
- Identifies the recoard in the table.
column:
required: true
description:
- Identifies the column in the record.
key:
required: true
description:
- Identifies the key in the record column
value:
required: true
description:
- Expected value for the table, record, column and key.
timeout:
required: false
default: 5
description:
- How long to wait for ovs-vswitchd to respond
"""
EXAMPLES = '''
# Increase the maximum idle time to 50 seconds before pruning unused kernel
# rules.
- openvswitch_db:
table: open_vswitch
record: .
col: other_config
key: max-idle
value: 50000
# Disable in band copy
- openvswitch_db:
table: Bridge
record: br-int
col: other_config
key: disable-in-band
value: true
# Remove in band key
- openvswitch_db:
state: present
table: Bridge
record: br-int
col: other_config
key: disable-in-band
'''
def map_obj_to_command(want, have, module):
""" Define ovs-vsctl command to meet desired state """
command = None
if module.params['state'] == 'absent':
if 'key' in have.keys():
templatized_command = "%(ovs-vsctl)s -t %(timeout)s remove %(table)s %(record)s " \
"%(col)s %(key)s=%(value)s"
command = templatized_command % module.params
else:
if 'key' not in have.keys():
templatized_command = "%(ovs-vsctl)s -t %(timeout)s add %(table)s %(record)s " \
"%(col)s %(key)s=%(value)s"
command = templatized_command % module.params
elif want['value'] != have['value']:
templatized_command = "%(ovs-vsctl)s -t %(timeout)s set %(table)s %(record)s " \
"%(col)s:%(key)s=%(value)s"
command = templatized_command % module.params
return command
def map_config_to_obj(module):
templatized_command = "%(ovs-vsctl)s -t %(timeout)s list %(table)s %(record)s"
command = templatized_command % module.params
rc, out, err = module.run_command(command, check_rc=True)
if rc != 0:
module.fail_json(msg=err)
match = re.search(r'^' + module.params['col'] + r'(\s+):(\s+)(.*)$', out, re.M)
col_value = match.group(3)
col_value_to_dict = {}
if match.group(3):
for kv in col_value[1:-1].split(','):
k, v = kv.split('=')
col_value_to_dict[k.strip()] = v.strip()
obj = {
'table': module.params['table'],
'record': module.params['record'],
'col': module.params['col'],
}
if module.params['key'] in col_value_to_dict:
obj['key'] = module.params['key']
obj['value'] = col_value_to_dict[module.params['key']]
return obj
def map_params_to_obj(module):
obj = {
'table': module.params['table'],
'record': module.params['record'],
'col': module.params['col'],
'key': module.params['key'],
'value': module.params['value']
}
return obj
# pylint: disable=E0602
def main():
""" Entry point for ansible module. """
argument_spec = {
'state': {'default': 'present', 'choices': ['present', 'absent']},
'table': {'required': True},
'record': {'required': True},
'col': {'required': True},
'key': {'required': True},
'value': {'required': True},
'timeout': {'default': 5, 'type': 'int'},
}
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
# We add ovs-vsctl to module_params to later build up templatized commands
module.params["ovs-vsctl"] = module.get_bin_path("ovs-vsctl", True)
want = map_params_to_obj(module)
have = map_config_to_obj(module)
command = map_obj_to_command(want, have, module)
result['command'] = command
if command:
if not module.check_mode:
module.run_command(command, check_rc=True)
result['changed'] = True
module.exit_json(**result)
# pylint: disable=W0614
# pylint: disable=W0401
# pylint: disable=W0622
# import module snippets
from ansible.module_utils.basic import *
import re
if __name__ == '__main__':
main()
| gpl-3.0 |
modesttree/Projeny | Source/mtm/log/LogStreamConsole.py | 1 | 4086 |
import os
import re
import sys
from mtm.ioc.Inject import Inject
import mtm.util.Util as Util
from mtm.log.Logger import LogType
import shutil
from mtm.util.Assert import *
import mtm.log.ColorConsole as ColorConsole
class AnsiiCodes:
BLACK = "\033[1;30m"
DARKBLACK = "\033[0;30m"
RED = "\033[1;31m"
DARKRED = "\033[0;31m"
GREEN = "\033[1;32m"
DARKGREEN = "\033[0;32m"
YELLOW = "\033[1;33m"
DARKYELLOW = "\033[0;33m"
BLUE = "\033[1;34m"
DARKBLUE = "\033[0;34m"
MAGENTA = "\033[1;35m"
DARKMAGENTA = "\033[0;35m"
CYAN = "\033[1;36m"
DARKCYAN = "\033[0;36m"
WHITE = "\033[1;37m"
DARKWHITE = "\033[0;37m"
END = "\033[0;0m"
class LogStreamConsole:
_log = Inject('Logger')
_sys = Inject('SystemHelper')
_varManager = Inject('VarManager')
_config = Inject('Config')
def __init__(self, verbose, veryVerbose):
self._verbose = verbose or veryVerbose
self._veryVerbose = veryVerbose
self._useColors = self._config.tryGetBool(False, 'LogStreamConsole', 'UseColors')
self._fileStream = None
if self._config.tryGetBool(False, 'LogStreamConsole', 'OutputToFilteredLog'):
self._fileStream = self._getFileStream()
if self._useColors:
self._initColors()
def _initColors(self):
self._defaultColors = ColorConsole.get_text_attr()
self._defaultBg = self._defaultColors & 0x0070
self._defaultFg = self._defaultColors & 0x0007
def log(self, logType, message):
assertIsNotNone(logType)
if logType == LogType.Noise and not self._veryVerbose:
return
if logType == LogType.Debug and not self._verbose:
return
if logType == LogType.Error:
self._output(logType, message, sys.stderr, self._useColors)
else:
self._output(logType, message, sys.stdout, self._useColors)
if self._fileStream:
self._output(logType, message, self._fileStream, False)
def _getFileStream(self):
primaryPath = self._varManager.expand('[LogFilteredPath]')
if not primaryPath:
raise Exception("Could not find path for log file")
previousPath = None
if self._varManager.hasKey('LogFilteredPreviousPath'):
previousPath = self._varManager.expand('[LogFilteredPreviousPath]')
# Keep one old build log
if os.path.isfile(primaryPath) and previousPath:
shutil.copy2(primaryPath, previousPath)
return open(primaryPath, 'w', encoding='utf-8', errors='ignore')
def _getHeadingIndent(self):
return self._log.getCurrentNumHeadings() * " "
def _output(self, logType, message, stream, useColors):
stream.write('\n')
stream.write(self._getHeadingIndent())
if not useColors or logType == LogType.Info:
stream.write(message)
stream.flush()
else:
ColorConsole.set_text_attr(self._getColorAttrs(logType))
stream.write(message)
stream.flush()
ColorConsole.set_text_attr(self._defaultColors)
def _getColorAttrs(self, logType):
if logType == LogType.HeadingStart:
return ColorConsole.FOREGROUND_CYAN | self._defaultBg | ColorConsole.FOREGROUND_INTENSITY
if logType == LogType.HeadingEnd:
return ColorConsole.FOREGROUND_BLACK | self._defaultBg | ColorConsole.FOREGROUND_INTENSITY
if logType == LogType.Good:
return ColorConsole.FOREGROUND_GREEN | self._defaultBg | ColorConsole.FOREGROUND_INTENSITY
if logType == LogType.Warn:
return ColorConsole.FOREGROUND_YELLOW | self._defaultBg | ColorConsole.FOREGROUND_INTENSITY
if logType == LogType.Error:
return ColorConsole.FOREGROUND_RED | self._defaultBg | ColorConsole.FOREGROUND_INTENSITY
assertThat(logType == LogType.Debug or logType == LogType.Noise)
return ColorConsole.FOREGROUND_BLACK | self._defaultBg | ColorConsole.FOREGROUND_INTENSITY
| mit |
snewell/wlint | lib/wlint/wordcounter.py | 2 | 1837 | #!/usr/bin/python3
"""Functions to count words."""
import re
_RIGHT_SINGLE_QUOTE = "’"
_PATTERN = re.compile(r"\b([\w\-\'{}]+)\b".format(_RIGHT_SINGLE_QUOTE))
def count_line(text):
"""
Count the number of words in a line.
A word is defined as anything any combination of word characters (defined
by \\w in Python's regex library), hyphens (-), straight single-quotes, and
a right single typographer's quote (the latter two function as apostrophes
depending on context), surrounded by word boundaries (defined by \\b in
Python's regex library).
Despite the function name, it can count words across multiple lines.
Arguments:
text -- ext to count words in
Return:
A tuple containing:
- a dictionary of every word and its count
- the total number of words
"""
counts = {}
local_count = 0
match = _PATTERN.search(text)
while match:
word = match.group(1)
new_count = counts.get(word, 0) + 1
counts[word] = new_count
local_count += 1
match = _PATTERN.search(text, match.end())
return (counts, local_count)
def _update_full_counts(full_counts, line_counts):
for word, count in line_counts[0].items():
new_count = full_counts.get(word, 0) + count
full_counts[word] = new_count
def count_lines(handle):
"""
Count the number of words in an iterable object.
Arguments:
handle -- Something to iterate over.
Return:
A tuple containing:
- a dictionary of every word and its count
- the total number of words
"""
full_counts = {}
total_word_count = 0
for text in handle:
counts = count_line(text)
_update_full_counts(full_counts, counts)
total_word_count += counts[1]
return (full_counts, total_word_count)
| bsd-2-clause |
ofir123/CouchPotatoServer | libs/suds/store.py | 204 | 18425 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Contains XML text for documents to be distributed
with the suds lib. Also, contains classes for accessing
these documents.
"""
from StringIO import StringIO
from logging import getLogger
log = getLogger(__name__)
#
# Soap section 5 encoding schema.
#
encoding = \
"""<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:tns="http://schemas.xmlsoap.org/soap/encoding/" targetNamespace="http://schemas.xmlsoap.org/soap/encoding/">
<xs:attribute name="root">
<xs:annotation>
<xs:documentation>
'root' can be used to distinguish serialization roots from other
elements that are present in a serialization but are not roots of
a serialized value graph
</xs:documentation>
</xs:annotation>
<xs:simpleType>
<xs:restriction base="xs:boolean">
<xs:pattern value="0|1"/>
</xs:restriction>
</xs:simpleType>
</xs:attribute>
<xs:attributeGroup name="commonAttributes">
<xs:annotation>
<xs:documentation>
Attributes common to all elements that function as accessors or
represent independent (multi-ref) values. The href attribute is
intended to be used in a manner like CONREF. That is, the element
content should be empty iff the href attribute appears
</xs:documentation>
</xs:annotation>
<xs:attribute name="id" type="xs:ID"/>
<xs:attribute name="href" type="xs:anyURI"/>
<xs:anyAttribute namespace="##other" processContents="lax"/>
</xs:attributeGroup>
<!-- Global Attributes. The following attributes are intended to be usable via qualified attribute names on any complex type referencing them. -->
<!-- Array attributes. Needed to give the type and dimensions of an array's contents, and the offset for partially-transmitted arrays. -->
<xs:simpleType name="arrayCoordinate">
<xs:restriction base="xs:string"/>
</xs:simpleType>
<xs:attribute name="arrayType" type="xs:string"/>
<xs:attribute name="offset" type="tns:arrayCoordinate"/>
<xs:attributeGroup name="arrayAttributes">
<xs:attribute ref="tns:arrayType"/>
<xs:attribute ref="tns:offset"/>
</xs:attributeGroup>
<xs:attribute name="position" type="tns:arrayCoordinate"/>
<xs:attributeGroup name="arrayMemberAttributes">
<xs:attribute ref="tns:position"/>
</xs:attributeGroup>
<xs:group name="Array">
<xs:sequence>
<xs:any namespace="##any" minOccurs="0" maxOccurs="unbounded" processContents="lax"/>
</xs:sequence>
</xs:group>
<xs:element name="Array" type="tns:Array"/>
<xs:complexType name="Array">
<xs:annotation>
<xs:documentation>
'Array' is a complex type for accessors identified by position
</xs:documentation>
</xs:annotation>
<xs:group ref="tns:Array" minOccurs="0"/>
<xs:attributeGroup ref="tns:arrayAttributes"/>
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:complexType>
<!-- 'Struct' is a complex type for accessors identified by name.
Constraint: No element may be have the same name as any other,
nor may any element have a maxOccurs > 1. -->
<xs:element name="Struct" type="tns:Struct"/>
<xs:group name="Struct">
<xs:sequence>
<xs:any namespace="##any" minOccurs="0" maxOccurs="unbounded" processContents="lax"/>
</xs:sequence>
</xs:group>
<xs:complexType name="Struct">
<xs:group ref="tns:Struct" minOccurs="0"/>
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:complexType>
<!-- 'Base64' can be used to serialize binary data using base64 encoding
as defined in RFC2045 but without the MIME line length limitation. -->
<xs:simpleType name="base64">
<xs:restriction base="xs:base64Binary"/>
</xs:simpleType>
<!-- Element declarations corresponding to each of the simple types in the
XML Schemas Specification. -->
<xs:element name="duration" type="tns:duration"/>
<xs:complexType name="duration">
<xs:simpleContent>
<xs:extension base="xs:duration">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="dateTime" type="tns:dateTime"/>
<xs:complexType name="dateTime">
<xs:simpleContent>
<xs:extension base="xs:dateTime">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="NOTATION" type="tns:NOTATION"/>
<xs:complexType name="NOTATION">
<xs:simpleContent>
<xs:extension base="xs:QName">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="time" type="tns:time"/>
<xs:complexType name="time">
<xs:simpleContent>
<xs:extension base="xs:time">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="date" type="tns:date"/>
<xs:complexType name="date">
<xs:simpleContent>
<xs:extension base="xs:date">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="gYearMonth" type="tns:gYearMonth"/>
<xs:complexType name="gYearMonth">
<xs:simpleContent>
<xs:extension base="xs:gYearMonth">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="gYear" type="tns:gYear"/>
<xs:complexType name="gYear">
<xs:simpleContent>
<xs:extension base="xs:gYear">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="gMonthDay" type="tns:gMonthDay"/>
<xs:complexType name="gMonthDay">
<xs:simpleContent>
<xs:extension base="xs:gMonthDay">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="gDay" type="tns:gDay"/>
<xs:complexType name="gDay">
<xs:simpleContent>
<xs:extension base="xs:gDay">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="gMonth" type="tns:gMonth"/>
<xs:complexType name="gMonth">
<xs:simpleContent>
<xs:extension base="xs:gMonth">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="boolean" type="tns:boolean"/>
<xs:complexType name="boolean">
<xs:simpleContent>
<xs:extension base="xs:boolean">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="base64Binary" type="tns:base64Binary"/>
<xs:complexType name="base64Binary">
<xs:simpleContent>
<xs:extension base="xs:base64Binary">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="hexBinary" type="tns:hexBinary"/>
<xs:complexType name="hexBinary">
<xs:simpleContent>
<xs:extension base="xs:hexBinary">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="float" type="tns:float"/>
<xs:complexType name="float">
<xs:simpleContent>
<xs:extension base="xs:float">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="double" type="tns:double"/>
<xs:complexType name="double">
<xs:simpleContent>
<xs:extension base="xs:double">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="anyURI" type="tns:anyURI"/>
<xs:complexType name="anyURI">
<xs:simpleContent>
<xs:extension base="xs:anyURI">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="QName" type="tns:QName"/>
<xs:complexType name="QName">
<xs:simpleContent>
<xs:extension base="xs:QName">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="string" type="tns:string"/>
<xs:complexType name="string">
<xs:simpleContent>
<xs:extension base="xs:string">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="normalizedString" type="tns:normalizedString"/>
<xs:complexType name="normalizedString">
<xs:simpleContent>
<xs:extension base="xs:normalizedString">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="token" type="tns:token"/>
<xs:complexType name="token">
<xs:simpleContent>
<xs:extension base="xs:token">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="language" type="tns:language"/>
<xs:complexType name="language">
<xs:simpleContent>
<xs:extension base="xs:language">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="Name" type="tns:Name"/>
<xs:complexType name="Name">
<xs:simpleContent>
<xs:extension base="xs:Name">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="NMTOKEN" type="tns:NMTOKEN"/>
<xs:complexType name="NMTOKEN">
<xs:simpleContent>
<xs:extension base="xs:NMTOKEN">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="NCName" type="tns:NCName"/>
<xs:complexType name="NCName">
<xs:simpleContent>
<xs:extension base="xs:NCName">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="NMTOKENS" type="tns:NMTOKENS"/>
<xs:complexType name="NMTOKENS">
<xs:simpleContent>
<xs:extension base="xs:NMTOKENS">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="ID" type="tns:ID"/>
<xs:complexType name="ID">
<xs:simpleContent>
<xs:extension base="xs:ID">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="IDREF" type="tns:IDREF"/>
<xs:complexType name="IDREF">
<xs:simpleContent>
<xs:extension base="xs:IDREF">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="ENTITY" type="tns:ENTITY"/>
<xs:complexType name="ENTITY">
<xs:simpleContent>
<xs:extension base="xs:ENTITY">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="IDREFS" type="tns:IDREFS"/>
<xs:complexType name="IDREFS">
<xs:simpleContent>
<xs:extension base="xs:IDREFS">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="ENTITIES" type="tns:ENTITIES"/>
<xs:complexType name="ENTITIES">
<xs:simpleContent>
<xs:extension base="xs:ENTITIES">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="decimal" type="tns:decimal"/>
<xs:complexType name="decimal">
<xs:simpleContent>
<xs:extension base="xs:decimal">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="integer" type="tns:integer"/>
<xs:complexType name="integer">
<xs:simpleContent>
<xs:extension base="xs:integer">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="nonPositiveInteger" type="tns:nonPositiveInteger"/>
<xs:complexType name="nonPositiveInteger">
<xs:simpleContent>
<xs:extension base="xs:nonPositiveInteger">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="negativeInteger" type="tns:negativeInteger"/>
<xs:complexType name="negativeInteger">
<xs:simpleContent>
<xs:extension base="xs:negativeInteger">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="long" type="tns:long"/>
<xs:complexType name="long">
<xs:simpleContent>
<xs:extension base="xs:long">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="int" type="tns:int"/>
<xs:complexType name="int">
<xs:simpleContent>
<xs:extension base="xs:int">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="short" type="tns:short"/>
<xs:complexType name="short">
<xs:simpleContent>
<xs:extension base="xs:short">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="byte" type="tns:byte"/>
<xs:complexType name="byte">
<xs:simpleContent>
<xs:extension base="xs:byte">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="nonNegativeInteger" type="tns:nonNegativeInteger"/>
<xs:complexType name="nonNegativeInteger">
<xs:simpleContent>
<xs:extension base="xs:nonNegativeInteger">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="unsignedLong" type="tns:unsignedLong"/>
<xs:complexType name="unsignedLong">
<xs:simpleContent>
<xs:extension base="xs:unsignedLong">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="unsignedInt" type="tns:unsignedInt"/>
<xs:complexType name="unsignedInt">
<xs:simpleContent>
<xs:extension base="xs:unsignedInt">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="unsignedShort" type="tns:unsignedShort"/>
<xs:complexType name="unsignedShort">
<xs:simpleContent>
<xs:extension base="xs:unsignedShort">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="unsignedByte" type="tns:unsignedByte"/>
<xs:complexType name="unsignedByte">
<xs:simpleContent>
<xs:extension base="xs:unsignedByte">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="positiveInteger" type="tns:positiveInteger"/>
<xs:complexType name="positiveInteger">
<xs:simpleContent>
<xs:extension base="xs:positiveInteger">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="anyType"/>
</xs:schema>
"""
class DocumentStore:
"""
The I{suds} document store provides a local repository
for xml documnts.
@cvar protocol: The URL protocol for the store.
@type protocol: str
@cvar store: The mapping of URL location to documents.
@type store: dict
"""
protocol = 'suds'
store = {
'schemas.xmlsoap.org/soap/encoding/' : encoding
}
def open(self, url):
"""
Open a document at the specified url.
@param url: A document URL.
@type url: str
@return: A file pointer to the document.
@rtype: StringIO
"""
protocol, location = self.split(url)
if protocol == self.protocol:
return self.find(location)
else:
return None
def find(self, location):
"""
Find the specified location in the store.
@param location: The I{location} part of a URL.
@type location: str
@return: An input stream to the document.
@rtype: StringIO
"""
try:
content = self.store[location]
return StringIO(content)
except:
reason = 'location "%s" not in document store' % location
raise Exception, reason
def split(self, url):
"""
Split the url into I{protocol} and I{location}
@param url: A URL.
@param url: str
@return: (I{url}, I{location})
@rtype: tuple
"""
parts = url.split('://', 1)
if len(parts) == 2:
return parts
else:
return (None, url) | gpl-3.0 |
j4/horizon | openstack_dashboard/dashboards/project/access_and_security/floating_ips/views.py | 65 | 2958 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing floating IPs.
"""
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from neutronclient.common import exceptions as neutron_exc
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.usage import quotas
from openstack_dashboard.dashboards.project.access_and_security.\
floating_ips import forms as project_forms
from openstack_dashboard.dashboards.project.access_and_security.\
floating_ips import workflows as project_workflows
class AssociateView(workflows.WorkflowView):
workflow_class = project_workflows.IPAssociationWorkflow
class AllocateView(forms.ModalFormView):
form_class = project_forms.FloatingIpAllocate
form_id = "associate_floating_ip_form"
modal_header = _("Allocate Floating IP")
template_name = 'project/access_and_security/floating_ips/allocate.html'
submit_label = _("Allocate IP")
submit_url = reverse_lazy(
"horizon:project:access_and_security:floating_ips:allocate")
success_url = reverse_lazy('horizon:project:access_and_security:index')
def get_object_display(self, obj):
return obj.ip
def get_context_data(self, **kwargs):
context = super(AllocateView, self).get_context_data(**kwargs)
try:
context['usages'] = quotas.tenant_quota_usages(self.request)
except Exception:
exceptions.handle(self.request)
return context
def get_initial(self):
try:
pools = api.network.floating_ip_pools_list(self.request)
except neutron_exc.ConnectionFailed:
pools = []
exceptions.handle(self.request)
except Exception:
pools = []
exceptions.handle(self.request,
_("Unable to retrieve floating IP pools."))
pool_list = [(pool.id, pool.name) for pool in pools]
if not pool_list:
pool_list = [(None, _("No floating IP pools available"))]
return {'pool_list': pool_list}
| apache-2.0 |
xrg/django-static-gitified | tests/regressiontests/templates/unicode.py | 84 | 1290 | # -*- coding: utf-8 -*-
from django.template import Template, TemplateEncodingError, Context
from django.utils.safestring import SafeData
from django.utils.unittest import TestCase
class UnicodeTests(TestCase):
def test_template(self):
# Templates can be created from unicode strings.
t1 = Template(u'ŠĐĆŽćžšđ {{ var }}')
# Templates can also be created from bytestrings. These are assumed to
# be encoded using UTF-8.
s = '\xc5\xa0\xc4\x90\xc4\x86\xc5\xbd\xc4\x87\xc5\xbe\xc5\xa1\xc4\x91 {{ var }}'
t2 = Template(s)
s = '\x80\xc5\xc0'
self.assertRaises(TemplateEncodingError, Template, s)
# Contexts can be constructed from unicode or UTF-8 bytestrings.
c1 = Context({"var": "foo"})
c2 = Context({u"var": "foo"})
c3 = Context({"var": u"Đđ"})
c4 = Context({u"var": "\xc4\x90\xc4\x91"})
# Since both templates and all four contexts represent the same thing,
# they all render the same (and are returned as unicode objects and
# "safe" objects as well, for auto-escaping purposes).
self.assertEqual(t1.render(c3), t2.render(c3))
self.assertIsInstance(t1.render(c3), unicode)
self.assertIsInstance(t1.render(c3), SafeData)
| bsd-3-clause |
midma101/AndIWasJustGoingToBed | .venv/lib/python2.7/site-packages/Crypto/Random/__init__.py | 126 | 1669 | # -*- coding: utf-8 -*-
#
# Random/__init__.py : PyCrypto random number generation
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
__revision__ = "$Id$"
__all__ = ['new']
from Crypto.Random import OSRNG
from Crypto.Random import _UserFriendlyRNG
def new(*args, **kwargs):
"""Return a file-like object that outputs cryptographically random bytes."""
return _UserFriendlyRNG.new(*args, **kwargs)
def atfork():
"""Call this whenever you call os.fork()"""
_UserFriendlyRNG.reinit()
def get_random_bytes(n):
"""Return the specified number of cryptographically-strong random bytes."""
return _UserFriendlyRNG.get_random_bytes(n)
# vim:set ts=4 sw=4 sts=4 expandtab:
| mit |
TRESCLOUD/odoopub | extra-addons/sale_journal_sequence/sale.py | 4 | 1720 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Cubic ERP - Teradata SAC (<http://cubicerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields
class sale_order(osv.Model):
_inherit = 'sale.order'
def create(self, cr, uid, vals, context=None):
if ('name' not in vals) or (vals.get('name')=='/'):
if vals.get('invoice_type_id',False):
invoice_type = self.pool.get('sale_journal.invoice.type').browse(cr, uid, vals['invoice_type_id'], context=context)
if invoice_type.sequence_id:
vals['name'] = self.pool.get('ir.sequence').get_id(cr, uid, invoice_type.sequence_id.id)
else:
vals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'sale_order') or '/'
return super(sale_order, self).create(cr, uid, vals, context=context)
| agpl-3.0 |
HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/Filters/Sources/Testing/Python/TestButtonSource.py | 20 | 2447 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Test the button source
# The image to map on the button
r = vtk.vtkJPEGReader()
r.SetFileName(VTK_DATA_ROOT + "/Data/beach.jpg")
r.Update()
t = vtk.vtkTexture()
t.SetInputConnection(r.GetOutputPort())
dims = r.GetOutput().GetDimensions()
d1 = dims[0]
d2 = dims[1]
# The first elliptical button
bs = vtk.vtkEllipticalButtonSource()
bs.SetWidth(2)
bs.SetHeight(1)
bs.SetDepth(0.2)
bs.SetCircumferentialResolution(64)
bs.SetRadialRatio(1.1)
bs.SetShoulderResolution(8)
bs.SetTextureResolution(4)
bs.TwoSidedOn()
bMapper = vtk.vtkPolyDataMapper()
bMapper.SetInputConnection(bs.GetOutputPort())
b1 = vtk.vtkActor()
b1.SetMapper(bMapper)
b1.SetTexture(t)
# The second elliptical button
bs2 = vtk.vtkEllipticalButtonSource()
bs2.SetWidth(2)
bs2.SetHeight(1)
bs2.SetDepth(0.2)
bs2.SetCircumferentialResolution(64)
bs2.SetRadialRatio(1.1)
bs2.SetShoulderResolution(8)
bs2.SetTextureResolution(4)
bs2.TwoSidedOn()
bs2.SetCenter(2, 0, 0)
bs2.SetTextureStyleToFitImage()
bs2.SetTextureDimensions(d1, d2)
b2Mapper = vtk.vtkPolyDataMapper()
b2Mapper.SetInputConnection(bs2.GetOutputPort())
b2 = vtk.vtkActor()
b2.SetMapper(b2Mapper)
b2.SetTexture(t)
# The third rectangular button
bs3 = vtk.vtkRectangularButtonSource()
bs3.SetWidth(1.5)
bs3.SetHeight(0.75)
bs3.SetDepth(0.2)
bs3.TwoSidedOn()
bs3.SetCenter(0, 1, 0)
bs3.SetTextureDimensions(d1, d2)
b3Mapper = vtk.vtkPolyDataMapper()
b3Mapper.SetInputConnection(bs3.GetOutputPort())
b3 = vtk.vtkActor()
b3.SetMapper(b3Mapper)
b3.SetTexture(t)
# The fourth rectangular button
bs4 = vtk.vtkRectangularButtonSource()
bs4.SetWidth(1.5)
bs4.SetHeight(0.75)
bs4.SetDepth(0.2)
bs4.TwoSidedOn()
bs4.SetCenter(2, 1, 0)
bs4.SetTextureStyleToFitImage()
bs4.SetTextureDimensions(d1, d2)
b4Mapper = vtk.vtkPolyDataMapper()
b4Mapper.SetInputConnection(bs4.GetOutputPort())
b4 = vtk.vtkActor()
b4.SetMapper(b4Mapper)
b4.SetTexture(t)
# Create the RenderWindow, Renderer and Interactive Renderer
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren1.AddActor(b1)
ren1.AddActor(b2)
ren1.AddActor(b3)
ren1.AddActor(b4)
ren1.SetBackground(0, 0, 0)
renWin.SetSize(250, 150)
renWin.Render()
ren1.GetActiveCamera().Zoom(1.5)
renWin.Render()
iren.Initialize()
#iren.Start()
| gpl-3.0 |
MinimalOS/external_skia | tools/git_utils.py | 68 | 5453 | # Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module to host the ChangeGitBranch class and test_git_executable function.
"""
import os
import subprocess
import misc_utils
class ChangeGitBranch(object):
"""Class to manage git branches.
This class allows one to create a new branch in a repository based
off of a given commit, and restore the original tree state.
Assumes current working directory is a git repository.
Example:
with ChangeGitBranch():
edit_files(files)
git_add(files)
git_commit()
git_format_patch('HEAD~')
# At this point, the repository is returned to its original
# state.
Constructor Args:
branch_name: (string) if not None, the name of the branch to
use. If None, then use a temporary branch that will be
deleted. If the branch already exists, then a different
branch name will be created. Use git_branch_name() to
find the actual branch name used.
upstream_branch: (string) if not None, the name of the branch or
commit to branch from. If None, then use origin/master
verbose: (boolean) if true, makes debugging easier.
Raises:
OSError: the git executable disappeared.
subprocess.CalledProcessError: git returned unexpected status.
Exception: if the given branch name exists, or if the repository
isn't clean on exit, or git can't be found.
"""
# pylint: disable=I0011,R0903,R0902
def __init__(self,
branch_name=None,
upstream_branch=None,
verbose=False):
# pylint: disable=I0011,R0913
if branch_name:
self._branch_name = branch_name
self._delete_branch = False
else:
self._branch_name = 'ChangeGitBranchTempBranch'
self._delete_branch = True
if upstream_branch:
self._upstream_branch = upstream_branch
else:
self._upstream_branch = 'origin/master'
self._git = git_executable()
if not self._git:
raise Exception('Git can\'t be found.')
self._stash = None
self._original_branch = None
self._vsp = misc_utils.VerboseSubprocess(verbose)
def _has_git_diff(self):
"""Return true iff repository has uncommited changes."""
return bool(self._vsp.call([self._git, 'diff', '--quiet', 'HEAD']))
def _branch_exists(self, branch):
"""Return true iff branch exists."""
return 0 == self._vsp.call([self._git, 'show-ref', '--quiet', branch])
def __enter__(self):
git, vsp = self._git, self._vsp
if self._branch_exists(self._branch_name):
i, branch_name = 0, self._branch_name
while self._branch_exists(branch_name):
i += 1
branch_name = '%s_%03d' % (self._branch_name, i)
self._branch_name = branch_name
self._stash = self._has_git_diff()
if self._stash:
vsp.check_call([git, 'stash', 'save'])
self._original_branch = git_branch_name(vsp.verbose)
vsp.check_call(
[git, 'checkout', '-q', '-b',
self._branch_name, self._upstream_branch])
def __exit__(self, etype, value, traceback):
git, vsp = self._git, self._vsp
if self._has_git_diff():
status = vsp.check_output([git, 'status', '-s'])
raise Exception('git checkout not clean:\n%s' % status)
vsp.check_call([git, 'checkout', '-q', self._original_branch])
if self._stash:
vsp.check_call([git, 'stash', 'pop'])
if self._delete_branch:
assert self._original_branch != self._branch_name
vsp.check_call([git, 'branch', '-D', self._branch_name])
def git_branch_name(verbose=False):
"""Return a description of the current branch.
Args:
verbose: (boolean) makes debugging easier
Returns:
A string suitable for passing to `git checkout` later.
"""
git = git_executable()
vsp = misc_utils.VerboseSubprocess(verbose)
try:
full_branch = vsp.strip_output([git, 'symbolic-ref', 'HEAD'])
return full_branch.split('/')[-1]
except (subprocess.CalledProcessError,):
# "fatal: ref HEAD is not a symbolic ref"
return vsp.strip_output([git, 'rev-parse', 'HEAD'])
def test_git_executable(git):
"""Test the git executable.
Args:
git: git executable path.
Returns:
True if test is successful.
"""
with open(os.devnull, 'w') as devnull:
try:
subprocess.call([git, '--version'], stdout=devnull)
except (OSError,):
return False
return True
def git_executable():
"""Find the git executable.
If the GIT_EXECUTABLE environment variable is set, that will
override whatever is found in the PATH.
If no suitable executable is found, return None
Returns:
A string suiable for passing to subprocess functions, or None.
"""
env_git = os.environ.get('GIT_EXECUTABLE')
if env_git and test_git_executable(env_git):
return env_git
for git in ('git', 'git.exe', 'git.bat'):
if test_git_executable(git):
return git
return None
| bsd-3-clause |
51reboot/actual_09_homework | 10/jinderui/cmdb/user/dbutils.py | 1 | 1788 | # encoding: utf-8
import os,sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
import gconf
import MySQLdb
# encoding: utf-8
import os,sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
import gconf
import MySQLdb
class MySQLConnection(object):
"""docstring for MySQLConnection"""
def __init__(self, host,port,user,passwd,db,charset='utf8'):
self.__host = host
self.__port = port
self.__user = user
self.__passwd = passwd
self.__db = db
self.__charset = charset
self.__conn = None
self.__cur = None
self.__connect()
def __connect(self):
try:
self.__conn = MySQLdb.connect(host=self.__host,port=self.__port, user=self.__user, \
passwd = self.__passwd,db = self.__db,charset=self.__charset)
self.__cur = self.__conn.cursor()
except BaseException as e:
print e
def commit(self):
if self.__conn:
self.__conn.commit()
def execute(self,sql,args=()):
_cnt = 0
if self.__cur:
_cnt = self.__cur.execute(sql,args)
return _cnt
def fetch(self,sql,args=()):
_cnt = 0
_rt_list = []
_cnt = self.execute(sql,args)
if self.__cur:
_rt_list = self.__cur.fetchall()
return _cnt, _rt_list
def close(self):
self.commit()
if self.__cur:
self.__cur.close()
self.__cur = None
if self.__conn:
self.__conn.close()
self.__conn =None
@classmethod
def execute_sql(self,sql,args=(),fetch=True):
_count =0
_rt_list =[]
_conn = MySQLConnection(host=gconf.MYSQL_HOST,port=gconf.MYSQL_PORT, \
db=gconf.MYSQL_DB,user=gconf.MYSQL_USER, passwd=gconf.MYSQL_PASSWORD,charset=gconf.MYSQL_CHARSET)
if fetch:
_count,_rt_list = _conn.fetch(sql,args)
else:
_count = _conn.execute(sql,args)
_conn.close()
return _count,_rt_list
if __name__ == '__main__':
print MySQLConnection.execute_sql('select * from user') | mit |
JohnGeorgiadis/x-formation-test-sample | node_modules/lite-server/node_modules/browser-sync/node_modules/socket.io/node_modules/engine.io/node_modules/engine.io-parser/node_modules/utf8/tests/generate-test-data.py | 1788 | 1435 | #!/usr/bin/env python
import re
import json
# https://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
# Skip non-scalar values.
if codePoint >= 0xD800 and codePoint <= 0xDFFF:
continue
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
| mit |
pjryan126/solid-start-careers | store/api/glassdoor/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/tokenizer.py | 1710 | 76929 | from __future__ import absolute_import, division, unicode_literals
try:
chr = unichr # flake8: noqa
except NameError:
pass
from collections import deque
from .constants import spaceCharacters
from .constants import entities
from .constants import asciiLetters, asciiUpper2Lower
from .constants import digits, hexDigits, EOF
from .constants import tokenTypes, tagTokenTypes
from .constants import replacementCharacters
from .inputstream import HTMLInputStream
from .trie import Trie
entitiesTrie = Trie(entities)
class HTMLTokenizer(object):
""" This class takes care of tokenizing HTML.
* self.currentToken
Holds the token that is currently being processed.
* self.state
Holds a reference to the method to be invoked... XXX
* self.stream
Points to HTMLInputStream object.
"""
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=True, lowercaseAttrName=True, parser=None):
self.stream = HTMLInputStream(stream, encoding, parseMeta, useChardet)
self.parser = parser
# Perform case conversions?
self.lowercaseElementName = lowercaseElementName
self.lowercaseAttrName = lowercaseAttrName
# Setup the initial tokenizer state
self.escapeFlag = False
self.lastFourChars = []
self.state = self.dataState
self.escape = False
# The current token being created
self.currentToken = None
super(HTMLTokenizer, self).__init__()
def __iter__(self):
""" This is where the magic happens.
We do our usually processing through the states and when we have a token
to return we yield the token which pauses processing until the next token
is requested.
"""
self.tokenQueue = deque([])
# Start processing. When EOF is reached self.state will return False
# instead of True and the loop will terminate.
while self.state():
while self.stream.errors:
yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
while self.tokenQueue:
yield self.tokenQueue.popleft()
def consumeNumberEntity(self, isHex):
"""This function returns either U+FFFD or the character based on the
decimal or hexadecimal representation. It also discards ";" if present.
If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
"""
allowed = digits
radix = 10
if isHex:
allowed = hexDigits
radix = 16
charStack = []
# Consume all the characters that are in range while making sure we
# don't hit an EOF.
c = self.stream.char()
while c in allowed and c is not EOF:
charStack.append(c)
c = self.stream.char()
# Convert the set of characters consumed to an int.
charAsInt = int("".join(charStack), radix)
# Certain characters get replaced with others
if charAsInt in replacementCharacters:
char = replacementCharacters[charAsInt]
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
elif ((0xD800 <= charAsInt <= 0xDFFF) or
(charAsInt > 0x10FFFF)):
char = "\uFFFD"
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
else:
# Should speed up this check somehow (e.g. move the set to a constant)
if ((0x0001 <= charAsInt <= 0x0008) or
(0x000E <= charAsInt <= 0x001F) or
(0x007F <= charAsInt <= 0x009F) or
(0xFDD0 <= charAsInt <= 0xFDEF) or
charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE,
0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE,
0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE,
0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE,
0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE,
0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE,
0xFFFFF, 0x10FFFE, 0x10FFFF])):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
try:
# Try/except needed as UCS-2 Python builds' unichar only works
# within the BMP.
char = chr(charAsInt)
except ValueError:
v = charAsInt - 0x10000
char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF))
# Discard the ; if present. Otherwise, put it back on the queue and
# invoke parseError on parser.
if c != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"numeric-entity-without-semicolon"})
self.stream.unget(c)
return char
def consumeEntity(self, allowedChar=None, fromAttribute=False):
# Initialise to the default output for when no entity is matched
output = "&"
charStack = [self.stream.char()]
if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&")
or (allowedChar is not None and allowedChar == charStack[0])):
self.stream.unget(charStack[0])
elif charStack[0] == "#":
# Read the next character to see if it's hex or decimal
hex = False
charStack.append(self.stream.char())
if charStack[-1] in ("x", "X"):
hex = True
charStack.append(self.stream.char())
# charStack[-1] should be the first digit
if (hex and charStack[-1] in hexDigits) \
or (not hex and charStack[-1] in digits):
# At least one digit found, so consume the whole number
self.stream.unget(charStack[-1])
output = self.consumeNumberEntity(hex)
else:
# No digits found
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "expected-numeric-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
# At this point in the process might have named entity. Entities
# are stored in the global variable "entities".
#
# Consume characters and compare to these to a substring of the
# entity names in the list until the substring no longer matches.
while (charStack[-1] is not EOF):
if not entitiesTrie.has_keys_with_prefix("".join(charStack)):
break
charStack.append(self.stream.char())
# At this point we have a string that starts with some characters
# that may match an entity
# Try to find the longest entity the string will match to take care
# of ¬i for instance.
try:
entityName = entitiesTrie.longest_prefix("".join(charStack[:-1]))
entityLength = len(entityName)
except KeyError:
entityName = None
if entityName is not None:
if entityName[-1] != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"named-entity-without-semicolon"})
if (entityName[-1] != ";" and fromAttribute and
(charStack[entityLength] in asciiLetters or
charStack[entityLength] in digits or
charStack[entityLength] == "=")):
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
output = entities[entityName]
self.stream.unget(charStack.pop())
output += "".join(charStack[entityLength:])
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-named-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
if fromAttribute:
self.currentToken["data"][-1][1] += output
else:
if output in spaceCharacters:
tokenType = "SpaceCharacters"
else:
tokenType = "Characters"
self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output})
def processEntityInAttribute(self, allowedChar):
"""This method replaces the need for "entityInAttributeValueState".
"""
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
def emitCurrentToken(self):
"""This method is a generic handler for emitting the tags. It also sets
the state to "data" because that's what's needed after a token has been
emitted.
"""
token = self.currentToken
# Add token to the queue to be yielded
if (token["type"] in tagTokenTypes):
if self.lowercaseElementName:
token["name"] = token["name"].translate(asciiUpper2Lower)
if token["type"] == tokenTypes["EndTag"]:
if token["data"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "attributes-in-end-tag"})
if token["selfClosing"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "self-closing-flag-on-end-tag"})
self.tokenQueue.append(token)
self.state = self.dataState
# Below are the various tokenizer states worked out.
def dataState(self):
data = self.stream.char()
if data == "&":
self.state = self.entityDataState
elif data == "<":
self.state = self.tagOpenState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\u0000"})
elif data is EOF:
# Tokenization ends.
return False
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def entityDataState(self):
self.consumeEntity()
self.state = self.dataState
return True
def rcdataState(self):
data = self.stream.char()
if data == "&":
self.state = self.characterReferenceInRcdata
elif data == "<":
self.state = self.rcdataLessThanSignState
elif data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def characterReferenceInRcdata(self):
self.consumeEntity()
self.state = self.rcdataState
return True
def rawtextState(self):
data = self.stream.char()
if data == "<":
self.state = self.rawtextLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataState(self):
data = self.stream.char()
if data == "<":
self.state = self.scriptDataLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def plaintextState(self):
data = self.stream.char()
if data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + self.stream.charsUntil("\u0000")})
return True
def tagOpenState(self):
data = self.stream.char()
if data == "!":
self.state = self.markupDeclarationOpenState
elif data == "/":
self.state = self.closeTagOpenState
elif data in asciiLetters:
self.currentToken = {"type": tokenTypes["StartTag"],
"name": data, "data": [],
"selfClosing": False,
"selfClosingAcknowledged": False}
self.state = self.tagNameState
elif data == ">":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-right-bracket"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"})
self.state = self.dataState
elif data == "?":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-question-mark"})
self.stream.unget(data)
self.state = self.bogusCommentState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.dataState
return True
def closeTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
"data": [], "selfClosing": False}
self.state = self.tagNameState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-right-bracket"})
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-eof"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.state = self.dataState
else:
# XXX data can be _'_...
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-char",
"datavars": {"data": data}})
self.stream.unget(data)
self.state = self.bogusCommentState
return True
def tagNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-tag-name"})
self.state = self.dataState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
else:
self.currentToken["name"] += data
# (Don't use charsUntil here, because tag names are
# very short and it's faster to not do anything fancy)
return True
def rcdataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rcdataEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rcdataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rawtextLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rawtextEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rawtextEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rawtextState
return True
def scriptDataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEndTagOpenState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"})
self.state = self.scriptDataEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.scriptDataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapeStartDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.state = self.dataState
else:
chars = self.stream.charsUntil(("<", "-", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEscapedEndTagOpenState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data})
self.temporaryBuffer = data
self.state = self.scriptDataDoubleEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer = data
self.state = self.scriptDataEscapedEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapeStartState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataDoubleEscapedState
else:
self.state = self.scriptDataEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
return True
def scriptDataDoubleEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"})
self.temporaryBuffer = ""
self.state = self.scriptDataDoubleEscapeEndState
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapeEndState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataEscapedState
else:
self.state = self.scriptDataDoubleEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def beforeAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data in ("'", '"', "=", "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-name-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def attributeNameState(self):
data = self.stream.char()
leavingThisState = True
emitToken = False
if data == "=":
self.state = self.beforeAttributeValueState
elif data in asciiLetters:
self.currentToken["data"][-1][0] += data +\
self.stream.charsUntil(asciiLetters, True)
leavingThisState = False
elif data == ">":
# XXX If we emit here the attributes are converted to a dict
# without being checked and when the code below runs we error
# because data is a dict not a list
emitToken = True
elif data in spaceCharacters:
self.state = self.afterAttributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][0] += "\uFFFD"
leavingThisState = False
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"invalid-character-in-attribute-name"})
self.currentToken["data"][-1][0] += data
leavingThisState = False
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-attribute-name"})
self.state = self.dataState
else:
self.currentToken["data"][-1][0] += data
leavingThisState = False
if leavingThisState:
# Attributes are not dropped at this stage. That happens when the
# start tag token is emitted so values can still be safely appended
# to attributes, but we do want to report the parse error in time.
if self.lowercaseAttrName:
self.currentToken["data"][-1][0] = (
self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
for name, value in self.currentToken["data"][:-1]:
if self.currentToken["data"][-1][0] == name:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"duplicate-attribute"})
break
# XXX Fix for above XXX
if emitToken:
self.emitCurrentToken()
return True
def afterAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "=":
self.state = self.beforeAttributeValueState
elif data == ">":
self.emitCurrentToken()
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-after-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-end-of-tag-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def beforeAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "\"":
self.state = self.attributeValueDoubleQuotedState
elif data == "&":
self.state = self.attributeValueUnQuotedState
self.stream.unget(data)
elif data == "'":
self.state = self.attributeValueSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-right-bracket"})
self.emitCurrentToken()
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
self.state = self.attributeValueUnQuotedState
elif data in ("=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"equals-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
return True
def attributeValueDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute('"')
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-double-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("\"", "&", "\u0000"))
return True
def attributeValueSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute("'")
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-single-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("'", "&", "\u0000"))
return True
def attributeValueUnQuotedState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == "&":
self.processEntityInAttribute(">")
elif data == ">":
self.emitCurrentToken()
elif data in ('"', "'", "=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-no-quotes"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data + self.stream.charsUntil(
frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters)
return True
def afterAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-EOF-after-attribute-value"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-attribute-value"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def selfClosingStartTagState(self):
data = self.stream.char()
if data == ">":
self.currentToken["selfClosing"] = True
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"unexpected-EOF-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def bogusCommentState(self):
# Make a new comment token and give it as value all the characters
# until the first > or EOF (charsUntil checks for EOF automatically)
# and emit it.
data = self.stream.charsUntil(">")
data = data.replace("\u0000", "\uFFFD")
self.tokenQueue.append(
{"type": tokenTypes["Comment"], "data": data})
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.dataState
return True
def markupDeclarationOpenState(self):
charStack = [self.stream.char()]
if charStack[-1] == "-":
charStack.append(self.stream.char())
if charStack[-1] == "-":
self.currentToken = {"type": tokenTypes["Comment"], "data": ""}
self.state = self.commentStartState
return True
elif charStack[-1] in ('d', 'D'):
matched = True
for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'),
('y', 'Y'), ('p', 'P'), ('e', 'E')):
charStack.append(self.stream.char())
if charStack[-1] not in expected:
matched = False
break
if matched:
self.currentToken = {"type": tokenTypes["Doctype"],
"name": "",
"publicId": None, "systemId": None,
"correct": True}
self.state = self.doctypeState
return True
elif (charStack[-1] == "[" and
self.parser is not None and
self.parser.tree.openElements and
self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace):
matched = True
for expected in ["C", "D", "A", "T", "A", "["]:
charStack.append(self.stream.char())
if charStack[-1] != expected:
matched = False
break
if matched:
self.state = self.cdataSectionState
return True
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-dashes-or-doctype"})
while charStack:
self.stream.unget(charStack.pop())
self.state = self.bogusCommentState
return True
def commentStartState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentStartDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data
self.state = self.commentState
return True
def commentStartDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data + \
self.stream.charsUntil(("-", "\u0000"))
return True
def commentEndDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentEndState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--\uFFFD"
self.state = self.commentState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-bang-after-double-dash-in-comment"})
self.state = self.commentEndBangState
elif data == "-":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-dash-after-double-dash-in-comment"})
self.currentToken["data"] += data
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-double-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-comment"})
self.currentToken["data"] += "--" + data
self.state = self.commentState
return True
def commentEndBangState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "-":
self.currentToken["data"] += "--!"
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--!\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-bang-state"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "--!" + data
self.state = self.commentState
return True
def doctypeState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"need-space-after-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeNameState
return True
def beforeDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-right-bracket"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] = "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] = data
self.state = self.doctypeNameState
return True
def doctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.state = self.afterDoctypeNameState
elif data == ">":
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype-name"})
self.currentToken["correct"] = False
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] += data
return True
def afterDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.currentToken["correct"] = False
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
if data in ("p", "P"):
matched = True
for expected in (("u", "U"), ("b", "B"), ("l", "L"),
("i", "I"), ("c", "C")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypePublicKeywordState
return True
elif data in ("s", "S"):
matched = True
for expected in (("y", "Y"), ("s", "S"), ("t", "T"),
("e", "E"), ("m", "M")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypeSystemKeywordState
return True
# All the characters read before the current 'data' will be
# [a-zA-Z], so they're garbage in the bogus doctype and can be
# discarded; only the latest character might be '>' or EOF
# and needs to be ungetted
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-space-or-right-bracket-in-doctype", "datavars":
{"data": data}})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypePublicKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypePublicIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
return True
def beforeDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypePublicIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def doctypePublicIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def afterDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.betweenDoctypePublicAndSystemIdentifiersState
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def betweenDoctypePublicAndSystemIdentifiersState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypeSystemKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeSystemIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
return True
def beforeDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypeSystemIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def doctypeSystemIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def afterDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.state = self.bogusDoctypeState
return True
def bogusDoctypeState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
# XXX EMIT
self.stream.unget(data)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
pass
return True
def cdataSectionState(self):
data = []
while True:
data.append(self.stream.charsUntil("]"))
data.append(self.stream.charsUntil(">"))
char = self.stream.char()
if char == EOF:
break
else:
assert char == ">"
if data[-1][-2:] == "]]":
data[-1] = data[-1][:-2]
break
else:
data.append(char)
data = "".join(data)
# Deal with null here rather than in the parser
nullCount = data.count("\u0000")
if nullCount > 0:
for i in range(nullCount):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
data = data.replace("\u0000", "\uFFFD")
if data:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": data})
self.state = self.dataState
return True
| gpl-2.0 |
abomyi/django | tests/lookup/tests.py | 20 | 37757 | from __future__ import unicode_literals
import collections
from datetime import datetime
from operator import attrgetter
from unittest import skipUnless
from django.core.exceptions import FieldError
from django.db import connection
from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature
from .models import Article, Author, Game, MyISAMArticle, Player, Season, Tag
class LookupTests(TestCase):
def setUp(self):
# Create a few Authors.
self.au1 = Author(name='Author 1')
self.au1.save()
self.au2 = Author(name='Author 2')
self.au2.save()
# Create a couple of Articles.
self.a1 = Article(headline='Article 1', pub_date=datetime(2005, 7, 26), author=self.au1)
self.a1.save()
self.a2 = Article(headline='Article 2', pub_date=datetime(2005, 7, 27), author=self.au1)
self.a2.save()
self.a3 = Article(headline='Article 3', pub_date=datetime(2005, 7, 27), author=self.au1)
self.a3.save()
self.a4 = Article(headline='Article 4', pub_date=datetime(2005, 7, 28), author=self.au1)
self.a4.save()
self.a5 = Article(headline='Article 5', pub_date=datetime(2005, 8, 1, 9, 0), author=self.au2)
self.a5.save()
self.a6 = Article(headline='Article 6', pub_date=datetime(2005, 8, 1, 8, 0), author=self.au2)
self.a6.save()
self.a7 = Article(headline='Article 7', pub_date=datetime(2005, 7, 27), author=self.au2)
self.a7.save()
# Create a few Tags.
self.t1 = Tag(name='Tag 1')
self.t1.save()
self.t1.articles.add(self.a1, self.a2, self.a3)
self.t2 = Tag(name='Tag 2')
self.t2.save()
self.t2.articles.add(self.a3, self.a4, self.a5)
self.t3 = Tag(name='Tag 3')
self.t3.save()
self.t3.articles.add(self.a5, self.a6, self.a7)
def test_exists(self):
# We can use .exists() to check that there are some
self.assertTrue(Article.objects.exists())
for a in Article.objects.all():
a.delete()
# There should be none now!
self.assertFalse(Article.objects.exists())
def test_lookup_int_as_str(self):
# Integer value can be queried using string
self.assertQuerysetEqual(Article.objects.filter(id__iexact=str(self.a1.id)),
['<Article: Article 1>'])
@skipUnlessDBFeature('supports_date_lookup_using_string')
def test_lookup_date_as_str(self):
# A date lookup can be performed using a string search
self.assertQuerysetEqual(Article.objects.filter(pub_date__startswith='2005'),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
def test_iterator(self):
# Each QuerySet gets iterator(), which is a generator that "lazily"
# returns results using database-level iteration.
self.assertIsInstance(Article.objects.iterator(), collections.Iterator)
self.assertQuerysetEqual(Article.objects.iterator(),
[
'Article 5',
'Article 6',
'Article 4',
'Article 2',
'Article 3',
'Article 7',
'Article 1',
],
transform=attrgetter('headline'))
# iterator() can be used on any QuerySet.
self.assertQuerysetEqual(
Article.objects.filter(headline__endswith='4').iterator(),
['Article 4'],
transform=attrgetter('headline'))
def test_count(self):
# count() returns the number of objects matching search criteria.
self.assertEqual(Article.objects.count(), 7)
self.assertEqual(Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).count(), 3)
self.assertEqual(Article.objects.filter(headline__startswith='Blah blah').count(), 0)
# count() should respect sliced query sets.
articles = Article.objects.all()
self.assertEqual(articles.count(), 7)
self.assertEqual(articles[:4].count(), 4)
self.assertEqual(articles[1:100].count(), 6)
self.assertEqual(articles[10:100].count(), 0)
# Date and date/time lookups can also be done with strings.
self.assertEqual(Article.objects.filter(pub_date__exact='2005-07-27 00:00:00').count(), 3)
def test_in_bulk(self):
# in_bulk() takes a list of IDs and returns a dictionary mapping IDs to objects.
arts = Article.objects.in_bulk([self.a1.id, self.a2.id])
self.assertEqual(arts[self.a1.id], self.a1)
self.assertEqual(arts[self.a2.id], self.a2)
self.assertEqual(Article.objects.in_bulk([self.a3.id]), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk({self.a3.id}), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk(frozenset([self.a3.id])), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk((self.a3.id,)), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk([1000]), {})
self.assertEqual(Article.objects.in_bulk([]), {})
self.assertEqual(Article.objects.in_bulk(iter([self.a1.id])), {self.a1.id: self.a1})
self.assertEqual(Article.objects.in_bulk(iter([])), {})
self.assertRaises(TypeError, Article.objects.in_bulk)
self.assertRaises(TypeError, Article.objects.in_bulk, headline__startswith='Blah')
def test_values(self):
# values() returns a list of dictionaries instead of object instances --
# and you can specify which fields you want to retrieve.
identity = lambda x: x
self.assertQuerysetEqual(Article.objects.values('headline'),
[
{'headline': 'Article 5'},
{'headline': 'Article 6'},
{'headline': 'Article 4'},
{'headline': 'Article 2'},
{'headline': 'Article 3'},
{'headline': 'Article 7'},
{'headline': 'Article 1'},
],
transform=identity)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).values('id'),
[{'id': self.a2.id}, {'id': self.a3.id}, {'id': self.a7.id}],
transform=identity)
self.assertQuerysetEqual(Article.objects.values('id', 'headline'),
[
{'id': self.a5.id, 'headline': 'Article 5'},
{'id': self.a6.id, 'headline': 'Article 6'},
{'id': self.a4.id, 'headline': 'Article 4'},
{'id': self.a2.id, 'headline': 'Article 2'},
{'id': self.a3.id, 'headline': 'Article 3'},
{'id': self.a7.id, 'headline': 'Article 7'},
{'id': self.a1.id, 'headline': 'Article 1'},
],
transform=identity)
# You can use values() with iterator() for memory savings,
# because iterator() uses database-level iteration.
self.assertQuerysetEqual(Article.objects.values('id', 'headline').iterator(),
[
{'headline': 'Article 5', 'id': self.a5.id},
{'headline': 'Article 6', 'id': self.a6.id},
{'headline': 'Article 4', 'id': self.a4.id},
{'headline': 'Article 2', 'id': self.a2.id},
{'headline': 'Article 3', 'id': self.a3.id},
{'headline': 'Article 7', 'id': self.a7.id},
{'headline': 'Article 1', 'id': self.a1.id},
],
transform=identity)
# The values() method works with "extra" fields specified in extra(select).
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_one'),
[
{'id': self.a5.id, 'id_plus_one': self.a5.id + 1},
{'id': self.a6.id, 'id_plus_one': self.a6.id + 1},
{'id': self.a4.id, 'id_plus_one': self.a4.id + 1},
{'id': self.a2.id, 'id_plus_one': self.a2.id + 1},
{'id': self.a3.id, 'id_plus_one': self.a3.id + 1},
{'id': self.a7.id, 'id_plus_one': self.a7.id + 1},
{'id': self.a1.id, 'id_plus_one': self.a1.id + 1},
],
transform=identity)
data = {
'id_plus_one': 'id+1',
'id_plus_two': 'id+2',
'id_plus_three': 'id+3',
'id_plus_four': 'id+4',
'id_plus_five': 'id+5',
'id_plus_six': 'id+6',
'id_plus_seven': 'id+7',
'id_plus_eight': 'id+8',
}
self.assertQuerysetEqual(
Article.objects.filter(id=self.a1.id).extra(select=data).values(*data.keys()),
[{
'id_plus_one': self.a1.id + 1,
'id_plus_two': self.a1.id + 2,
'id_plus_three': self.a1.id + 3,
'id_plus_four': self.a1.id + 4,
'id_plus_five': self.a1.id + 5,
'id_plus_six': self.a1.id + 6,
'id_plus_seven': self.a1.id + 7,
'id_plus_eight': self.a1.id + 8,
}], transform=identity)
# You can specify fields from forward and reverse relations, just like filter().
self.assertQuerysetEqual(
Article.objects.values('headline', 'author__name'),
[
{'headline': self.a5.headline, 'author__name': self.au2.name},
{'headline': self.a6.headline, 'author__name': self.au2.name},
{'headline': self.a4.headline, 'author__name': self.au1.name},
{'headline': self.a2.headline, 'author__name': self.au1.name},
{'headline': self.a3.headline, 'author__name': self.au1.name},
{'headline': self.a7.headline, 'author__name': self.au2.name},
{'headline': self.a1.headline, 'author__name': self.au1.name},
], transform=identity)
self.assertQuerysetEqual(
Author.objects.values('name', 'article__headline').order_by('name', 'article__headline'),
[
{'name': self.au1.name, 'article__headline': self.a1.headline},
{'name': self.au1.name, 'article__headline': self.a2.headline},
{'name': self.au1.name, 'article__headline': self.a3.headline},
{'name': self.au1.name, 'article__headline': self.a4.headline},
{'name': self.au2.name, 'article__headline': self.a5.headline},
{'name': self.au2.name, 'article__headline': self.a6.headline},
{'name': self.au2.name, 'article__headline': self.a7.headline},
], transform=identity)
self.assertQuerysetEqual(
(
Author.objects
.values('name', 'article__headline', 'article__tag__name')
.order_by('name', 'article__headline', 'article__tag__name')
),
[
{'name': self.au1.name, 'article__headline': self.a1.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a2.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t2.name},
{'name': self.au1.name, 'article__headline': self.a4.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a6.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a7.headline, 'article__tag__name': self.t3.name},
], transform=identity)
# However, an exception FieldDoesNotExist will be thrown if you specify
# a non-existent field name in values() (a field that is neither in the
# model nor in extra(select)).
self.assertRaises(FieldError,
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values,
'id', 'id_plus_two')
# If you don't specify field names to values(), all are returned.
self.assertQuerysetEqual(Article.objects.filter(id=self.a5.id).values(),
[{
'id': self.a5.id,
'author_id': self.au2.id,
'headline': 'Article 5',
'pub_date': datetime(2005, 8, 1, 9, 0)
}], transform=identity)
def test_values_list(self):
# values_list() is similar to values(), except that the results are
# returned as a list of tuples, rather than a list of dictionaries.
# Within each tuple, the order of the elements is the same as the order
# of fields in the values_list() call.
identity = lambda x: x
self.assertQuerysetEqual(Article.objects.values_list('headline'),
[
('Article 5',),
('Article 6',),
('Article 4',),
('Article 2',),
('Article 3',),
('Article 7',),
('Article 1',),
], transform=identity)
self.assertQuerysetEqual(Article.objects.values_list('id').order_by('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
transform=identity)
self.assertQuerysetEqual(
Article.objects.values_list('id', flat=True).order_by('id'),
[self.a1.id, self.a2.id, self.a3.id, self.a4.id, self.a5.id, self.a6.id, self.a7.id],
transform=identity)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'})
.order_by('id').values_list('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
transform=identity)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'})
.order_by('id').values_list('id_plus_one', 'id'),
[
(self.a1.id + 1, self.a1.id),
(self.a2.id + 1, self.a2.id),
(self.a3.id + 1, self.a3.id),
(self.a4.id + 1, self.a4.id),
(self.a5.id + 1, self.a5.id),
(self.a6.id + 1, self.a6.id),
(self.a7.id + 1, self.a7.id)
],
transform=identity)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'})
.order_by('id').values_list('id', 'id_plus_one'),
[
(self.a1.id, self.a1.id + 1),
(self.a2.id, self.a2.id + 1),
(self.a3.id, self.a3.id + 1),
(self.a4.id, self.a4.id + 1),
(self.a5.id, self.a5.id + 1),
(self.a6.id, self.a6.id + 1),
(self.a7.id, self.a7.id + 1)
],
transform=identity)
self.assertQuerysetEqual(
(
Author.objects
.values_list('name', 'article__headline', 'article__tag__name')
.order_by('name', 'article__headline', 'article__tag__name')
),
[
(self.au1.name, self.a1.headline, self.t1.name),
(self.au1.name, self.a2.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t2.name),
(self.au1.name, self.a4.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t3.name),
(self.au2.name, self.a6.headline, self.t3.name),
(self.au2.name, self.a7.headline, self.t3.name),
], transform=identity)
self.assertRaises(TypeError, Article.objects.values_list, 'id', 'headline', flat=True)
def test_get_next_previous_by(self):
# Every DateField and DateTimeField creates get_next_by_FOO() and
# get_previous_by_FOO() methods. In the case of identical date values,
# these methods will use the ID as a fallback check. This guarantees
# that no records are skipped or duplicated.
self.assertEqual(repr(self.a1.get_next_by_pub_date()),
'<Article: Article 2>')
self.assertEqual(repr(self.a2.get_next_by_pub_date()),
'<Article: Article 3>')
self.assertEqual(repr(self.a2.get_next_by_pub_date(headline__endswith='6')),
'<Article: Article 6>')
self.assertEqual(repr(self.a3.get_next_by_pub_date()),
'<Article: Article 7>')
self.assertEqual(repr(self.a4.get_next_by_pub_date()),
'<Article: Article 6>')
self.assertRaises(Article.DoesNotExist, self.a5.get_next_by_pub_date)
self.assertEqual(repr(self.a6.get_next_by_pub_date()),
'<Article: Article 5>')
self.assertEqual(repr(self.a7.get_next_by_pub_date()),
'<Article: Article 4>')
self.assertEqual(repr(self.a7.get_previous_by_pub_date()),
'<Article: Article 3>')
self.assertEqual(repr(self.a6.get_previous_by_pub_date()),
'<Article: Article 4>')
self.assertEqual(repr(self.a5.get_previous_by_pub_date()),
'<Article: Article 6>')
self.assertEqual(repr(self.a4.get_previous_by_pub_date()),
'<Article: Article 7>')
self.assertEqual(repr(self.a3.get_previous_by_pub_date()),
'<Article: Article 2>')
self.assertEqual(repr(self.a2.get_previous_by_pub_date()),
'<Article: Article 1>')
def test_escaping(self):
# Underscores, percent signs and backslashes have special meaning in the
# underlying SQL code, but Django handles the quoting of them automatically.
a8 = Article(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20))
a8.save()
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article'),
[
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article_'),
['<Article: Article_ with underscore>'])
a9 = Article(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21))
a9.save()
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article'),
[
'<Article: Article% with percent sign>',
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article%'),
['<Article: Article% with percent sign>'])
a10 = Article(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22))
a10.save()
self.assertQuerysetEqual(Article.objects.filter(headline__contains='\\'),
['<Article: Article with \ backslash>'])
def test_exclude(self):
Article.objects.create(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20))
Article.objects.create(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21))
Article.objects.create(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22))
# exclude() is the opposite of filter() when doing lookups:
self.assertQuerysetEqual(
Article.objects.filter(headline__contains='Article').exclude(headline__contains='with'),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.exclude(headline__startswith="Article_"),
[
'<Article: Article with \\ backslash>',
'<Article: Article% with percent sign>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.exclude(headline="Article 7"),
[
'<Article: Article with \\ backslash>',
'<Article: Article% with percent sign>',
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 1>',
])
def test_none(self):
# none() returns a QuerySet that behaves like any other QuerySet object
self.assertQuerysetEqual(Article.objects.none(), [])
self.assertQuerysetEqual(
Article.objects.none().filter(headline__startswith='Article'), [])
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article').none(), [])
self.assertEqual(Article.objects.none().count(), 0)
self.assertEqual(
Article.objects.none().update(headline="This should not take effect"), 0)
self.assertQuerysetEqual(
[article for article in Article.objects.none().iterator()],
[])
def test_in(self):
# using __in with an empty list should return an empty query set
self.assertQuerysetEqual(Article.objects.filter(id__in=[]), [])
self.assertQuerysetEqual(Article.objects.exclude(id__in=[]),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
def test_error_messages(self):
# Programming errors are pointed out with nice error messages
try:
Article.objects.filter(pub_date_year='2005').count()
self.fail('FieldError not raised')
except FieldError as ex:
self.assertEqual(str(ex), "Cannot resolve keyword 'pub_date_year' "
"into field. Choices are: author, author_id, headline, "
"id, pub_date, tag")
try:
Article.objects.filter(headline__starts='Article')
self.fail('FieldError not raised')
except FieldError as ex:
self.assertEqual(
str(ex), "Unsupported lookup 'starts' for CharField "
"or join on the field not permitted.")
def test_relation_nested_lookup_error(self):
# An invalid nested lookup on a related field raises a useful error.
msg = 'Related Field got invalid lookup: editor'
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(author__editor__name='James')
def test_regex(self):
# Create some articles with a bit more interesting headlines for testing field lookups:
for a in Article.objects.all():
a.delete()
now = datetime.now()
a1 = Article(pub_date=now, headline='f')
a1.save()
a2 = Article(pub_date=now, headline='fo')
a2.save()
a3 = Article(pub_date=now, headline='foo')
a3.save()
a4 = Article(pub_date=now, headline='fooo')
a4.save()
a5 = Article(pub_date=now, headline='hey-Foo')
a5.save()
a6 = Article(pub_date=now, headline='bar')
a6.save()
a7 = Article(pub_date=now, headline='AbBa')
a7.save()
a8 = Article(pub_date=now, headline='baz')
a8.save()
a9 = Article(pub_date=now, headline='baxZ')
a9.save()
# zero-or-more
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'fo*'),
['<Article: f>', '<Article: fo>', '<Article: foo>', '<Article: fooo>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'fo*'),
[
'<Article: f>',
'<Article: fo>',
'<Article: foo>',
'<Article: fooo>',
'<Article: hey-Foo>',
])
# one-or-more
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'fo+'),
['<Article: fo>', '<Article: foo>', '<Article: fooo>'])
# wildcard
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'fooo?'),
['<Article: foo>', '<Article: fooo>'])
# leading anchor
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'^b'),
['<Article: bar>', '<Article: baxZ>', '<Article: baz>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'^a'),
['<Article: AbBa>'])
# trailing anchor
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'z$'),
['<Article: baz>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'z$'),
['<Article: baxZ>', '<Article: baz>'])
# character sets
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'ba[rz]'),
['<Article: bar>', '<Article: baz>'])
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'ba.[RxZ]'),
['<Article: baxZ>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'ba[RxZ]'),
['<Article: bar>', '<Article: baxZ>', '<Article: baz>'])
# and more articles:
a10 = Article(pub_date=now, headline='foobar')
a10.save()
a11 = Article(pub_date=now, headline='foobaz')
a11.save()
a12 = Article(pub_date=now, headline='ooF')
a12.save()
a13 = Article(pub_date=now, headline='foobarbaz')
a13.save()
a14 = Article(pub_date=now, headline='zoocarfaz')
a14.save()
a15 = Article(pub_date=now, headline='barfoobaz')
a15.save()
a16 = Article(pub_date=now, headline='bazbaRFOO')
a16.save()
# alternation
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'oo(f|b)'),
[
'<Article: barfoobaz>',
'<Article: foobar>',
'<Article: foobarbaz>',
'<Article: foobaz>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'oo(f|b)'),
[
'<Article: barfoobaz>',
'<Article: foobar>',
'<Article: foobarbaz>',
'<Article: foobaz>',
'<Article: ooF>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'^foo(f|b)'),
['<Article: foobar>', '<Article: foobarbaz>', '<Article: foobaz>'])
# greedy matching
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'b.*az'),
[
'<Article: barfoobaz>',
'<Article: baz>',
'<Article: bazbaRFOO>',
'<Article: foobarbaz>',
'<Article: foobaz>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'b.*ar'),
[
'<Article: bar>',
'<Article: barfoobaz>',
'<Article: bazbaRFOO>',
'<Article: foobar>',
'<Article: foobarbaz>',
])
@skipUnlessDBFeature('supports_regex_backreferencing')
def test_regex_backreferencing(self):
# grouping and backreferences
now = datetime.now()
a10 = Article(pub_date=now, headline='foobar')
a10.save()
a11 = Article(pub_date=now, headline='foobaz')
a11.save()
a12 = Article(pub_date=now, headline='ooF')
a12.save()
a13 = Article(pub_date=now, headline='foobarbaz')
a13.save()
a14 = Article(pub_date=now, headline='zoocarfaz')
a14.save()
a15 = Article(pub_date=now, headline='barfoobaz')
a15.save()
a16 = Article(pub_date=now, headline='bazbaRFOO')
a16.save()
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'b(.).*b\1'),
['<Article: barfoobaz>', '<Article: bazbaRFOO>', '<Article: foobarbaz>'])
def test_regex_null(self):
"""
Ensure that a regex lookup does not fail on null/None values
"""
Season.objects.create(year=2012, gt=None)
self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^$'), [])
def test_regex_non_string(self):
"""
Ensure that a regex lookup does not fail on non-string fields
"""
Season.objects.create(year=2013, gt=444)
self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^444$'),
['<Season: 2013>'])
def test_regex_non_ascii(self):
"""
Ensure that a regex lookup does not trip on non-ASCII characters.
"""
Player.objects.create(name='\u2660')
Player.objects.get(name__regex='\u2660')
def test_nonfield_lookups(self):
"""
Ensure that a lookup query containing non-fields raises the proper
exception.
"""
with self.assertRaises(FieldError):
Article.objects.filter(headline__blahblah=99)
with self.assertRaises(FieldError):
Article.objects.filter(headline__blahblah__exact=99)
with self.assertRaises(FieldError):
Article.objects.filter(blahblah=99)
def test_lookup_collision(self):
"""
Ensure that genuine field names don't collide with built-in lookup
types ('year', 'gt', 'range', 'in' etc.).
Refs #11670.
"""
# Here we're using 'gt' as a code number for the year, e.g. 111=>2009.
season_2009 = Season.objects.create(year=2009, gt=111)
season_2009.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2010 = Season.objects.create(year=2010, gt=222)
season_2010.games.create(home="Houston Astros", away="Chicago Cubs")
season_2010.games.create(home="Houston Astros", away="Milwaukee Brewers")
season_2010.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011 = Season.objects.create(year=2011, gt=333)
season_2011.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011.games.create(home="Houston Astros", away="Milwaukee Brewers")
hunter_pence = Player.objects.create(name="Hunter Pence")
hunter_pence.games.set(Game.objects.filter(season__year__in=[2009, 2010]))
pudge = Player.objects.create(name="Ivan Rodriquez")
pudge.games.set(Game.objects.filter(season__year=2009))
pedro_feliz = Player.objects.create(name="Pedro Feliz")
pedro_feliz.games.set(Game.objects.filter(season__year__in=[2011]))
johnson = Player.objects.create(name="Johnson")
johnson.games.set(Game.objects.filter(season__year__in=[2011]))
# Games in 2010
self.assertEqual(Game.objects.filter(season__year=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__year__exact=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__gt=222).count(), 3)
self.assertEqual(Game.objects.filter(season__gt__exact=222).count(), 3)
# Games in 2011
self.assertEqual(Game.objects.filter(season__year=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__year__exact=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__gt=333).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__exact=333).count(), 2)
self.assertEqual(Game.objects.filter(season__year__gt=2010).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__gt=222).count(), 2)
# Games played in 2010 and 2011
self.assertEqual(Game.objects.filter(season__year__in=[2010, 2011]).count(), 5)
self.assertEqual(Game.objects.filter(season__year__gt=2009).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__in=[222, 333]).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__gt=111).count(), 5)
# Players who played in 2009
self.assertEqual(Player.objects.filter(games__season__year=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=111).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__exact=111).distinct().count(), 2)
# Players who played in 2010
self.assertEqual(Player.objects.filter(games__season__year=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__year__exact=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt=222).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt__exact=222).distinct().count(), 1)
# Players who played in 2011
self.assertEqual(Player.objects.filter(games__season__year=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=333).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__gt=2010).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__gt=222).distinct().count(), 2)
def test_chain_date_time_lookups(self):
self.assertQuerysetEqual(
Article.objects.filter(pub_date__month__gt=7),
['<Article: Article 5>', '<Article: Article 6>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__day__gte=27),
['<Article: Article 2>', '<Article: Article 3>',
'<Article: Article 4>', '<Article: Article 7>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__hour__lt=8),
['<Article: Article 1>', '<Article: Article 2>',
'<Article: Article 3>', '<Article: Article 4>',
'<Article: Article 7>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__minute__lte=0),
['<Article: Article 1>', '<Article: Article 2>',
'<Article: Article 3>', '<Article: Article 4>',
'<Article: Article 5>', '<Article: Article 6>',
'<Article: Article 7>'],
ordered=False
)
class LookupTransactionTests(TransactionTestCase):
available_apps = ['lookup']
@skipUnless(connection.vendor == 'mysql', 'requires MySQL')
def test_mysql_lookup_search(self):
# To use fulltext indexes on MySQL either version 5.6 is needed, or one must use
# MyISAM tables. Neither of these combinations is currently available on CI, so
# lets manually create a MyISAM table for Article model.
with connection.cursor() as cursor:
cursor.execute(
"CREATE TEMPORARY TABLE myisam_article ("
" id INTEGER PRIMARY KEY AUTO_INCREMENT, "
" headline VARCHAR(100) NOT NULL "
") ENGINE MYISAM")
dr = MyISAMArticle.objects.create(headline='Django Reinhardt')
MyISAMArticle.objects.create(headline='Ringo Star')
# NOTE: Needs to be created after the article has been saved.
cursor.execute(
'CREATE FULLTEXT INDEX myisam_article_ft ON myisam_article (headline)')
self.assertQuerysetEqual(
MyISAMArticle.objects.filter(headline__search='Reinhardt'),
[dr], lambda x: x)
| bsd-3-clause |
bioinformatics-ua/catalogue | emif/population_characteristics/conf_aggregations.py | 2 | 9739 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Universidade de Aveiro, DETI/IEETA, Bioinformatics Group - http://bioinformatics.ua.pt/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from population_characteristics.charts.operations import *
from population_characteristics.charts.aggregation import *
class ConfAggregations(object):
def __init__(self):
# TODO
pass
def read_settings_from_file(self):
# TODO
pass
""" get the default settings to load
"""
def get_main_settings(self):
result = []
### Overall Patient time per database
###
a = Aggregation()
a.var = "Observation time in a year"
a.operation = Operation.SUM
a.field_to_compute = "Count"
fy2 = Filter()
fy2.name = 'Name1'
fy2.key = 'Name1'
fy2.value = 'YEAR'
fy3 = Filter()
fy3.name = 'Name2'
fy3.key = 'Name2'
fy3.value = ''
fy4 = Filter()
fy4.name = 'Value2'
fy4.key = 'Value2'
fy4.value = ''
a.static_filters = [fy2, fy3, fy4 ]
af = AggregationField()
af.ttype = "slug"
af.name = "database_name_t"
af.key = "dbname"
af.value = "dbname_value"
af2 = AggregationField()
af2.ttype = "tsv"
af2.name = None
af2.key = None
af2.value = 'Gender'
af2.exclusive = True
a.aggregation_fields = [af, af2]
#a.aggregation_fields = [af, af2]
result.append(a)
### Overall Patient time per database (age groups)
###
a = Aggregation()
a.var = "Observation time in a year"
a.operation = Operation.SUM
a.field_to_compute = "Count"
fy2 = Filter()
fy2.name = 'Name1'
fy2.key = 'Name1'
fy2.value = 'YEAR'
fy3 = Filter()
fy3.name = 'Name2'
fy3.key = 'Name2'
fy3.value = 'AGE'
a.static_filters = [fy2, fy3 ]
af = AggregationField()
af.ttype = "slug"
af.name = "database_name_t"
af.key = "dbname"
af.value = "dbname_value"
af1 = AggregationField()
af1.ttype = "tsv"
af1.name = 'YEAR'
af1.key = 'Name1'
af1.value = 'Value1'
af2 = AggregationField()
af2.ttype = "tsv"
af2.name = None
af2.key = None
af2.value = 'Gender'
af2.exclusive = True
af3 = AggregationField()
af3.ttype = "tsv"
af3.name = 'AGE'
af3.key = 'Name2'
af3.value = 'Value2'
#a.aggregation_fields = [af, af1, af2]
a.aggregation_fields = [af, af1, af2, af3]
#result.append(a)
### Overall Patient time per Location
###
a = Aggregation()
a.var = "Observation time in a year"
a.operation = Operation.SUM
a.field_to_compute = "Count"
fy2 = Filter()
fy2.name = 'Name1'
fy2.key = 'Name1'
fy2.value = 'YEAR'
fy3 = Filter()
fy3.name = 'Name2'
fy3.key = 'Name2'
fy3.value = ''
fy4 = Filter()
fy4.name = 'Value2'
fy4.key = 'Value2'
fy4.value = ''
a.static_filters = [fy2, fy3,fy4 ]
af = AggregationField()
af.ttype = "slug"
af.name = "location_t"
af.key = "location"
af.value = "location_value"
af2 = AggregationField()
af2.ttype = "tsv"
af2.name = None
af2.key = None
af2.value = 'Gender'
af2.exclusive = True
#a.aggregation_fields = [af, af1, af2]
a.aggregation_fields = [af, af2]
result.append(a)
### Active Patients per database
###
a = Aggregation()
a.var = "Active patients"
a.operation = Operation.SUM
a.field_to_compute = "Count"
af = AggregationField()
af.ttype = "slug"
af.name = "database_name_t"
af.key = "dbname"
af.value = "dbname_value"
af1 = AggregationField()
af1.ttype = "tsv"
af1.name = 'YEAR'
af1.key = 'Name1'
af1.value = 'Value1'
fy3 = Filter()
fy3.name = 'Name2'
fy3.key = 'Name2'
fy3.value = ''
fy4 = Filter()
fy4.name = 'Value2'
fy4.key = 'Value2'
fy4.value = ''
fy2 = Filter()
fy2.name = 'Name1'
fy2.key = 'Name1'
fy2.value = 'YEAR'
a.static_filters = [fy2, fy3, fy4 ]
af2 = AggregationField()
af2.ttype = "tsv"
af2.name = None
af2.key = None
af2.value = 'Gender'
af2.exclusive = True
#a.aggregation_fields = [af, af1, af2]
a.aggregation_fields = [af, af2]
result.append(a)
### Active Patients per database (age groups)
###
a = Aggregation()
a.var = "Observation time in a year"
a.operation = Operation.SUM
a.field_to_compute = "Count"
fy2 = Filter()
fy2.name = 'Name1'
fy2.key = 'Name1'
fy2.value = 'YEAR'
fy3 = Filter()
fy3.name = 'Name2'
fy3.key = 'Name2'
fy3.value = 'AGE'
a.static_filters = [fy3, fy2]
af = AggregationField()
af.ttype = "slug"
af.name = "database_name_t"
af.key = "dbname"
af.value = "dbname_value"
af1 = AggregationField()
af1.ttype = "tsv"
af1.name = 'YEAR'
af1.key = 'Name1'
af1.value = 'Value1'
af2 = AggregationField()
af2.ttype = "tsv"
af2.name = None
af2.key = None
af2.value = 'Gender'
af2.exclusive = True
af3 = AggregationField()
af3.ttype = "tsv"
af3.name = 'AGE'
af3.key = 'Name2'
af3.value = 'Value2'
a.aggregation_fields = [af, af1, af2, af3]
result.append(a)
### Birth year per database
###
a2 = Aggregation()
a2.var = "Birth in year"
a2.operation = Operation.SUM
a2.field_to_compute = "Count"
fy2 = Filter()
fy2.name = 'Name1'
fy2.key = 'Name1'
fy2.value = 'YEAR'
a2.static_filters = [fy2]
af = AggregationField()
af.ttype = "slug"
af.name = "database_name_t"
af.key = "dbname"
af.value = "dbname_value"
af2 = AggregationField()
af2.ttype = "tsv"
af2.name = None
af2.key = None
af2.value = 'Gender'
af2.exclusive = True
af1 = AggregationField()
af1.ttype = "tsv"
af1.name = 'YEAR'
af1.key = 'Name1'
af1.value = 'Value1'
a2.aggregation_fields = [af, af2, af1]
result.append(a2)
### Age at start of year
###
a3 = Aggregation()
a3.var = "Age at start of year"
a3.operation = Operation.SUM
a3.field_to_compute = "Count"
fy2 = Filter()
fy2.name = 'Name1'
fy2.key = 'Name1'
fy2.value = 'YEAR'
fy3 = Filter()
fy3.name = 'Name2'
fy3.key = 'Name2'
fy3.value = 'AGE'
a3.static_filters = [fy2, fy3]
af = AggregationField()
af.ttype = "slug"
af.name = "database_name_t"
af.key = "dbname"
af.value = "dbname_value"
af1 = AggregationField()
af1.ttype = "tsv"
af1.name = 'YEAR'
af1.key = 'Name1'
af1.value = 'Value1'
af2 = AggregationField()
af2.ttype = "tsv"
af2.name = None
af2.key = None
af2.value = 'Gender'
af2.exclusive = True
af3 = AggregationField()
af3.ttype = "tsv"
af3.name = 'AGE'
af3.key = 'Name2'
af3.value = 'Value2'
a3.aggregation_fields = [af, af1, af2, af3]
result.append(a3)
### All Patients per database
###
a = Aggregation()
a.var = "Age at patient start"
a.operation = Operation.SUM
a.field_to_compute = "Count"
af = AggregationField()
af.ttype = "slug"
af.name = "database_name_t"
af.key = "dbname"
af.value = "dbname_value"
af1 = AggregationField()
af1.ttype = "tsv"
af1.name = 'YEAR'
af1.key = 'Name1'
af1.value = 'Value1'
fy3 = Filter()
fy3.name = 'Name2'
fy3.key = 'Name2'
fy3.value = ''
fy4 = Filter()
fy4.name = 'Value2'
fy4.key = 'Value2'
fy4.value = ''
fy2 = Filter()
fy2.name = 'Name1'
fy2.key = 'Name1'
fy2.value = 'YEAR'
a.static_filters = [fy2, fy3, fy4 ]
af2 = AggregationField()
af2.ttype = "tsv"
af2.name = None
af2.key = None
af2.value = 'Gender'
af2.exclusive = True
#a.aggregation_fields = [af, af1, af2]
a.aggregation_fields = [af, af2]
result.append(a)
return result
| gpl-3.0 |
mcgachey/edx-platform | common/djangoapps/geoinfo/tests/test_middleware.py | 137 | 5217 | """
Tests for CountryMiddleware.
"""
from mock import patch
import pygeoip
from django.contrib.sessions.middleware import SessionMiddleware
from django.test import TestCase
from django.test.client import RequestFactory
from geoinfo.middleware import CountryMiddleware
from student.tests.factories import UserFactory, AnonymousUserFactory
class CountryMiddlewareTests(TestCase):
"""
Tests of CountryMiddleware.
"""
def setUp(self):
super(CountryMiddlewareTests, self).setUp()
self.country_middleware = CountryMiddleware()
self.session_middleware = SessionMiddleware()
self.authenticated_user = UserFactory.create()
self.anonymous_user = AnonymousUserFactory.create()
self.request_factory = RequestFactory()
self.patcher = patch.object(pygeoip.GeoIP, 'country_code_by_addr', self.mock_country_code_by_addr)
self.patcher.start()
self.addCleanup(self.patcher.stop)
def mock_country_code_by_addr(self, ip_addr):
"""
Gives us a fake set of IPs
"""
ip_dict = {
'117.79.83.1': 'CN',
'117.79.83.100': 'CN',
'4.0.0.0': 'SD',
'2001:da8:20f:1502:edcf:550b:4a9c:207d': 'CN',
}
return ip_dict.get(ip_addr, 'US')
def test_country_code_added(self):
request = self.request_factory.get(
'/somewhere',
HTTP_X_FORWARDED_FOR='117.79.83.1',
)
request.user = self.authenticated_user
self.session_middleware.process_request(request)
# No country code exists before request.
self.assertNotIn('country_code', request.session)
self.assertNotIn('ip_address', request.session)
self.country_middleware.process_request(request)
# Country code added to session.
self.assertEqual('CN', request.session.get('country_code'))
self.assertEqual('117.79.83.1', request.session.get('ip_address'))
def test_ip_address_changed(self):
request = self.request_factory.get(
'/somewhere',
HTTP_X_FORWARDED_FOR='4.0.0.0',
)
request.user = self.anonymous_user
self.session_middleware.process_request(request)
request.session['country_code'] = 'CN'
request.session['ip_address'] = '117.79.83.1'
self.country_middleware.process_request(request)
# Country code is changed.
self.assertEqual('SD', request.session.get('country_code'))
self.assertEqual('4.0.0.0', request.session.get('ip_address'))
def test_ip_address_is_not_changed(self):
request = self.request_factory.get(
'/somewhere',
HTTP_X_FORWARDED_FOR='117.79.83.1',
)
request.user = self.anonymous_user
self.session_middleware.process_request(request)
request.session['country_code'] = 'CN'
request.session['ip_address'] = '117.79.83.1'
self.country_middleware.process_request(request)
# Country code is not changed.
self.assertEqual('CN', request.session.get('country_code'))
self.assertEqual('117.79.83.1', request.session.get('ip_address'))
def test_same_country_different_ip(self):
request = self.request_factory.get(
'/somewhere',
HTTP_X_FORWARDED_FOR='117.79.83.100',
)
request.user = self.anonymous_user
self.session_middleware.process_request(request)
request.session['country_code'] = 'CN'
request.session['ip_address'] = '117.79.83.1'
self.country_middleware.process_request(request)
# Country code is not changed.
self.assertEqual('CN', request.session.get('country_code'))
self.assertEqual('117.79.83.100', request.session.get('ip_address'))
def test_ip_address_is_none(self):
# IP address is not defined in request.
request = self.request_factory.get('/somewhere')
request.user = self.anonymous_user
# Run process_request to set up the session in the request
# to be able to override it.
self.session_middleware.process_request(request)
request.session['country_code'] = 'CN'
request.session['ip_address'] = '117.79.83.1'
self.country_middleware.process_request(request)
# No country code exists after request processing.
self.assertNotIn('country_code', request.session)
self.assertNotIn('ip_address', request.session)
def test_ip_address_is_ipv6(self):
request = self.request_factory.get(
'/somewhere',
HTTP_X_FORWARDED_FOR='2001:da8:20f:1502:edcf:550b:4a9c:207d'
)
request.user = self.authenticated_user
self.session_middleware.process_request(request)
# No country code exists before request.
self.assertNotIn('country_code', request.session)
self.assertNotIn('ip_address', request.session)
self.country_middleware.process_request(request)
# Country code added to session.
self.assertEqual('CN', request.session.get('country_code'))
self.assertEqual(
'2001:da8:20f:1502:edcf:550b:4a9c:207d', request.session.get('ip_address'))
| agpl-3.0 |
Vudentz/zephyr | scripts/west_commands/tests/test_stm32flash.py | 2 | 3707 | # Copyright (c) 2019 Thomas Kupper
#
# SPDX-License-Identifier: Apache-2.0
import argparse
import os
import platform
from unittest.mock import patch, call
import pytest
from runners.stm32flash import Stm32flashBinaryRunner
from conftest import RC_KERNEL_BIN
TEST_CMD = 'stm32flash'
TEST_DEVICE = '/dev/ttyUSB0'
if platform.system() == 'Darwin':
TEST_DEVICE = '/dev/tty.SLAB_USBtoUART'
TEST_BAUD = '115200'
TEST_FORCE_BINARY = False
TEST_ADDR = '0x08000000'
TEST_BIN_SIZE = '4095'
TEST_EXEC_ADDR = '0'
TEST_SERIAL_MODE = '8e1'
TEST_RESET = False
TEST_VERIFY = False
# Expected subprocesses to be run for each action. Using the
# runner_config fixture (and always specifying all necessary
# parameters) means we don't get 100% coverage, but it's a
# starting out point.
EXPECTED_COMMANDS = {
'info':
([TEST_CMD,
'-b', TEST_BAUD,
'-m', TEST_SERIAL_MODE,
TEST_DEVICE],),
'erase':
([TEST_CMD,
'-b', TEST_BAUD,
'-m', TEST_SERIAL_MODE,
'-S', TEST_ADDR + ":" + str((int(TEST_BIN_SIZE) >> 12) + 1 << 12),
'-o', TEST_DEVICE],),
'start':
([TEST_CMD,
'-b', TEST_BAUD,
'-m', TEST_SERIAL_MODE,
'-g', TEST_EXEC_ADDR, TEST_DEVICE],),
'write':
([TEST_CMD,
'-b', TEST_BAUD,
'-m', TEST_SERIAL_MODE,
'-S', TEST_ADDR + ":" + TEST_BIN_SIZE,
'-w', RC_KERNEL_BIN,
TEST_DEVICE],),
}
def require_patch(program):
assert program == TEST_CMD
def os_path_getsize_patch(filename):
if filename == RC_KERNEL_BIN:
return TEST_BIN_SIZE
return os.path.isfile(filename)
def os_path_isfile_patch(filename):
if filename == RC_KERNEL_BIN:
return True
return os.path.isfile(filename)
@pytest.mark.parametrize('action', EXPECTED_COMMANDS)
@patch('runners.core.ZephyrBinaryRunner.require', side_effect=require_patch)
@patch('runners.core.ZephyrBinaryRunner.check_call')
def test_stm32flash_init(cc, req, action, runner_config):
'''Test actions using a runner created by constructor.'''
test_exec_addr = TEST_EXEC_ADDR
if action == 'write':
test_exec_addr = None
runner = Stm32flashBinaryRunner(runner_config, device=TEST_DEVICE,
action=action, baud=TEST_BAUD, force_binary=TEST_FORCE_BINARY,
start_addr=TEST_ADDR, exec_addr=test_exec_addr,
serial_mode=TEST_SERIAL_MODE, reset=TEST_RESET, verify=TEST_VERIFY)
with patch('os.path.getsize', side_effect=os_path_getsize_patch):
with patch('os.path.isfile', side_effect=os_path_isfile_patch):
runner.run('flash')
assert cc.call_args_list == [call(x) for x in EXPECTED_COMMANDS[action]]
@pytest.mark.parametrize('action', EXPECTED_COMMANDS)
@patch('runners.core.ZephyrBinaryRunner.require', side_effect=require_patch)
@patch('runners.core.ZephyrBinaryRunner.check_call')
def test_stm32flash_create(cc, req, action, runner_config):
'''Test actions using a runner created from action line parameters.'''
if action == 'start':
args = ['--action', action, '--baud-rate', TEST_BAUD, '--start-addr', TEST_ADDR,
'--execution-addr', TEST_EXEC_ADDR]
else:
args = ['--action', action, '--baud-rate', TEST_BAUD, '--start-addr', TEST_ADDR]
parser = argparse.ArgumentParser()
Stm32flashBinaryRunner.add_parser(parser)
arg_namespace = parser.parse_args(args)
runner = Stm32flashBinaryRunner.create(runner_config, arg_namespace)
with patch('os.path.getsize', side_effect=os_path_getsize_patch):
with patch('os.path.isfile', side_effect=os_path_isfile_patch):
runner.run('flash')
assert cc.call_args_list == [call(x) for x in EXPECTED_COMMANDS[action]]
| apache-2.0 |
googleapis/googleapis-gen | google/cloud/tasks/v2beta3/tasks-v2beta3-py/scripts/fixup_tasks_v2beta3_keywords.py | 2 | 6744 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class tasksCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'create_queue': ('parent', 'queue', ),
'create_task': ('parent', 'task', 'response_view', ),
'delete_queue': ('name', ),
'delete_task': ('name', ),
'get_iam_policy': ('resource', 'options', ),
'get_queue': ('name', 'read_mask', ),
'get_task': ('name', 'response_view', ),
'list_queues': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ),
'list_tasks': ('parent', 'response_view', 'page_size', 'page_token', ),
'pause_queue': ('name', ),
'purge_queue': ('name', ),
'resume_queue': ('name', ),
'run_task': ('name', 'response_view', ),
'set_iam_policy': ('resource', 'policy', ),
'test_iam_permissions': ('resource', 'permissions', ),
'update_queue': ('queue', 'update_mask', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: not a.keyword.value in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=tasksCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the tasks client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
| apache-2.0 |
WebSpider/headphones | lib/bs4/builder/_htmlparser.py | 71 | 9102 | """Use the HTMLParser library to parse HTML files that aren't too bad."""
__all__ = [
'HTMLParserTreeBuilder',
]
from HTMLParser import HTMLParser
try:
from HTMLParser import HTMLParseError
except ImportError, e:
# HTMLParseError is removed in Python 3.5. Since it can never be
# thrown in 3.5, we can just define our own class as a placeholder.
class HTMLParseError(Exception):
pass
import sys
import warnings
# Starting in Python 3.2, the HTMLParser constructor takes a 'strict'
# argument, which we'd like to set to False. Unfortunately,
# http://bugs.python.org/issue13273 makes strict=True a better bet
# before Python 3.2.3.
#
# At the end of this file, we monkeypatch HTMLParser so that
# strict=True works well on Python 3.2.2.
major, minor, release = sys.version_info[:3]
CONSTRUCTOR_TAKES_STRICT = major == 3 and minor == 2 and release >= 3
CONSTRUCTOR_STRICT_IS_DEPRECATED = major == 3 and minor == 3
CONSTRUCTOR_TAKES_CONVERT_CHARREFS = major == 3 and minor >= 4
from bs4.element import (
CData,
Comment,
Declaration,
Doctype,
ProcessingInstruction,
)
from bs4.dammit import EntitySubstitution, UnicodeDammit
from bs4.builder import (
HTML,
HTMLTreeBuilder,
STRICT,
)
HTMLPARSER = 'html.parser'
class BeautifulSoupHTMLParser(HTMLParser):
def handle_starttag(self, name, attrs):
# XXX namespace
attr_dict = {}
for key, value in attrs:
# Change None attribute values to the empty string
# for consistency with the other tree builders.
if value is None:
value = ''
attr_dict[key] = value
attrvalue = '""'
self.soup.handle_starttag(name, None, None, attr_dict)
def handle_endtag(self, name):
self.soup.handle_endtag(name)
def handle_data(self, data):
self.soup.handle_data(data)
def handle_charref(self, name):
# XXX workaround for a bug in HTMLParser. Remove this once
# it's fixed in all supported versions.
# http://bugs.python.org/issue13633
if name.startswith('x'):
real_name = int(name.lstrip('x'), 16)
elif name.startswith('X'):
real_name = int(name.lstrip('X'), 16)
else:
real_name = int(name)
try:
data = unichr(real_name)
except (ValueError, OverflowError), e:
data = u"\N{REPLACEMENT CHARACTER}"
self.handle_data(data)
def handle_entityref(self, name):
character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
if character is not None:
data = character
else:
data = "&%s;" % name
self.handle_data(data)
def handle_comment(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(Comment)
def handle_decl(self, data):
self.soup.endData()
if data.startswith("DOCTYPE "):
data = data[len("DOCTYPE "):]
elif data == 'DOCTYPE':
# i.e. "<!DOCTYPE>"
data = ''
self.soup.handle_data(data)
self.soup.endData(Doctype)
def unknown_decl(self, data):
if data.upper().startswith('CDATA['):
cls = CData
data = data[len('CDATA['):]
else:
cls = Declaration
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(cls)
def handle_pi(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(ProcessingInstruction)
class HTMLParserTreeBuilder(HTMLTreeBuilder):
is_xml = False
picklable = True
NAME = HTMLPARSER
features = [NAME, HTML, STRICT]
def __init__(self, *args, **kwargs):
if CONSTRUCTOR_TAKES_STRICT and not CONSTRUCTOR_STRICT_IS_DEPRECATED:
kwargs['strict'] = False
if CONSTRUCTOR_TAKES_CONVERT_CHARREFS:
kwargs['convert_charrefs'] = False
self.parser_args = (args, kwargs)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None, exclude_encodings=None):
"""
:return: A 4-tuple (markup, original encoding, encoding
declared within markup, whether any characters had to be
replaced with REPLACEMENT CHARACTER).
"""
if isinstance(markup, unicode):
yield (markup, None, None, False)
return
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True,
exclude_encodings=exclude_encodings)
yield (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
args, kwargs = self.parser_args
parser = BeautifulSoupHTMLParser(*args, **kwargs)
parser.soup = self.soup
try:
parser.feed(markup)
except HTMLParseError, e:
warnings.warn(RuntimeWarning(
"Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help."))
raise e
# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some
# 3.2.3 code. This ensures they don't treat markup like <p></p> as a
# string.
#
# XXX This code can be removed once most Python 3 users are on 3.2.3.
if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT:
import re
attrfind_tolerant = re.compile(
r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?')
HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend
from html.parser import tagfind, attrfind
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
BeautifulSoupHTMLParser.parse_starttag = parse_starttag
BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode
CONSTRUCTOR_TAKES_STRICT = True
| gpl-3.0 |
heracek/django-nonrel | tests/regressiontests/i18n/commands/extraction.py | 1 | 6442 | import os
import re
import shutil
from django.test import TestCase
from django.core import management
LOCALE='de'
class ExtractorTests(TestCase):
PO_FILE='locale/%s/LC_MESSAGES/django.po' % LOCALE
def setUp(self):
self._cwd = os.getcwd()
self.test_dir = os.path.abspath(os.path.dirname(__file__))
def _rmrf(self, dname):
if os.path.commonprefix([self.test_dir, os.path.abspath(dname)]) != self.test_dir:
return
shutil.rmtree(dname)
def tearDown(self):
os.chdir(self.test_dir)
try:
self._rmrf('locale/%s' % LOCALE)
except OSError:
pass
os.chdir(self._cwd)
def assertMsgId(self, msgid, s, use_quotes=True):
if use_quotes:
msgid = '"%s"' % msgid
return self.assert_(re.search('^msgid %s' % msgid, s, re.MULTILINE))
def assertNotMsgId(self, msgid, s, use_quotes=True):
if use_quotes:
msgid = '"%s"' % msgid
return self.assert_(not re.search('^msgid %s' % msgid, s, re.MULTILINE))
class BasicExtractorTests(ExtractorTests):
def test_comments_extractor(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0)
self.assert_(os.path.exists(self.PO_FILE))
po_contents = open(self.PO_FILE, 'r').read()
self.assert_('#. Translators: This comment should be extracted' in po_contents)
self.assert_('This comment should not be extracted' not in po_contents)
# Comments in templates
self.assert_('#. Translators: Django template comment for translators' in po_contents)
self.assert_('#. Translators: Django comment block for translators' in po_contents)
def test_templatize(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0)
self.assert_(os.path.exists(self.PO_FILE))
po_contents = open(self.PO_FILE, 'r').read()
self.assertMsgId('I think that 100%% is more that 50%% of anything.', po_contents)
self.assertMsgId('I think that 100%% is more that 50%% of %\(obj\)s.', po_contents)
def test_extraction_error(self):
os.chdir(self.test_dir)
shutil.copyfile('./templates/template_with_error.txt', './templates/template_with_error.html')
self.assertRaises(SyntaxError, management.call_command, 'makemessages', locale=LOCALE, verbosity=0)
try: # TODO: Simplify this try/try block when we drop support for Python 2.4
try:
management.call_command('makemessages', locale=LOCALE, verbosity=0)
except SyntaxError, e:
self.assertEqual(str(e), 'Translation blocks must not include other block tags: blocktrans (file templates/template_with_error.html, line 3)')
finally:
os.remove('./templates/template_with_error.html')
os.remove('./templates/template_with_error.html.py') # Waiting for #8536 to be fixed
class JavascriptExtractorTests(ExtractorTests):
PO_FILE='locale/%s/LC_MESSAGES/djangojs.po' % LOCALE
def test_javascript_literals(self):
os.chdir(self.test_dir)
management.call_command('makemessages', domain='djangojs', locale=LOCALE, verbosity=0)
self.assert_(os.path.exists(self.PO_FILE))
po_contents = open(self.PO_FILE, 'r').read()
self.assertMsgId('This literal should be included.', po_contents)
self.assertMsgId('This one as well.', po_contents)
class IgnoredExtractorTests(ExtractorTests):
def test_ignore_option(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0, ignore_patterns=['ignore_dir/*'])
self.assert_(os.path.exists(self.PO_FILE))
po_contents = open(self.PO_FILE, 'r').read()
self.assertMsgId('This literal should be included.', po_contents)
self.assertNotMsgId('This should be ignored.', po_contents)
class SymlinkExtractorTests(ExtractorTests):
def setUp(self):
self._cwd = os.getcwd()
self.test_dir = os.path.abspath(os.path.dirname(__file__))
self.symlinked_dir = os.path.join(self.test_dir, 'templates_symlinked')
def tearDown(self):
super(SymlinkExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
os.remove(self.symlinked_dir)
except OSError:
pass
os.chdir(self._cwd)
def test_symlink(self):
if hasattr(os, 'symlink'):
if os.path.exists(self.symlinked_dir):
self.assert_(os.path.islink(self.symlinked_dir))
else:
os.symlink(os.path.join(self.test_dir, 'templates'), self.symlinked_dir)
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0, symlinks=True)
self.assert_(os.path.exists(self.PO_FILE))
po_contents = open(self.PO_FILE, 'r').read()
self.assertMsgId('This literal should be included.', po_contents)
self.assert_('templates_symlinked/test.html' in po_contents)
class CopyPluralFormsExtractorTests(ExtractorTests):
def test_copy_plural_forms(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0)
self.assert_(os.path.exists(self.PO_FILE))
po_contents = open(self.PO_FILE, 'r').read()
self.assert_('Plural-Forms: nplurals=2; plural=(n != 1)' in po_contents)
class NoWrapExtractorTests(ExtractorTests):
def test_no_wrap_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0, no_wrap=True)
self.assert_(os.path.exists(self.PO_FILE))
po_contents = open(self.PO_FILE, 'r').read()
self.assertMsgId('This literal should also be included wrapped or not wrapped depending on the use of the --no-wrap option.', po_contents)
def test_no_wrap_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0, no_wrap=False)
self.assert_(os.path.exists(self.PO_FILE))
po_contents = open(self.PO_FILE, 'r').read()
self.assertMsgId('""\n"This literal should also be included wrapped or not wrapped depending on the "\n"use of the --no-wrap option."', po_contents, use_quotes=False)
| bsd-3-clause |
arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/pip/cmdoptions.py | 361 | 9507 | """
shared options and groups
The principle here is to define options once, but *not* instantiate them globally.
One reason being that options with action='append' can carry state between parses.
pip parse's general options twice internally, and shouldn't pass on state.
To be consistent, all options will follow this design.
"""
import copy
from optparse import OptionGroup, SUPPRESS_HELP, Option
from pip.locations import build_prefix, default_log_file
def make_option_group(group, parser):
"""
Return an OptionGroup object
group -- assumed to be dict with 'name' and 'options' keys
parser -- an optparse Parser
"""
option_group = OptionGroup(parser, group['name'])
for option in group['options']:
option_group.add_option(option.make())
return option_group
class OptionMaker(object):
"""Class that stores the args/kwargs that would be used to make an Option,
for making them later, and uses deepcopy's to reset state."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def make(self):
args_copy = copy.deepcopy(self.args)
kwargs_copy = copy.deepcopy(self.kwargs)
return Option(*args_copy, **kwargs_copy)
###########
# options #
###########
help_ = OptionMaker(
'-h', '--help',
dest='help',
action='help',
help='Show help.')
require_virtualenv = OptionMaker(
# Run only if inside a virtualenv, bail if not.
'--require-virtualenv', '--require-venv',
dest='require_venv',
action='store_true',
default=False,
help=SUPPRESS_HELP)
verbose = OptionMaker(
'-v', '--verbose',
dest='verbose',
action='count',
default=0,
help='Give more output. Option is additive, and can be used up to 3 times.')
version = OptionMaker(
'-V', '--version',
dest='version',
action='store_true',
help='Show version and exit.')
quiet = OptionMaker(
'-q', '--quiet',
dest='quiet',
action='count',
default=0,
help='Give less output.')
log = OptionMaker(
'--log',
dest='log',
metavar='path',
help='Path to a verbose appending log. This log is inactive by default.')
log_explicit_levels = OptionMaker(
# Writes the log levels explicitely to the log'
'--log-explicit-levels',
dest='log_explicit_levels',
action='store_true',
default=False,
help=SUPPRESS_HELP)
log_file = OptionMaker(
# The default log file
'--log-file', '--local-log',
dest='log_file',
metavar='path',
default=default_log_file,
help='Path to a verbose non-appending log, that only logs failures. This log is active by default at %default.')
no_input = OptionMaker(
# Don't ask for input
'--no-input',
dest='no_input',
action='store_true',
default=False,
help=SUPPRESS_HELP)
proxy = OptionMaker(
'--proxy',
dest='proxy',
type='str',
default='',
help="Specify a proxy in the form [user:passwd@]proxy.server:port.")
timeout = OptionMaker(
'--timeout', '--default-timeout',
metavar='sec',
dest='timeout',
type='float',
default=15,
help='Set the socket timeout (default %default seconds).')
default_vcs = OptionMaker(
# The default version control system for editables, e.g. 'svn'
'--default-vcs',
dest='default_vcs',
type='str',
default='',
help=SUPPRESS_HELP)
skip_requirements_regex = OptionMaker(
# A regex to be used to skip requirements
'--skip-requirements-regex',
dest='skip_requirements_regex',
type='str',
default='',
help=SUPPRESS_HELP)
exists_action = OptionMaker(
# Option when path already exist
'--exists-action',
dest='exists_action',
type='choice',
choices=['s', 'i', 'w', 'b'],
default=[],
action='append',
metavar='action',
help="Default action when a path already exists: "
"(s)witch, (i)gnore, (w)ipe, (b)ackup.")
cert = OptionMaker(
'--cert',
dest='cert',
type='str',
default='',
metavar='path',
help = "Path to alternate CA bundle.")
index_url = OptionMaker(
'-i', '--index-url', '--pypi-url',
dest='index_url',
metavar='URL',
default='https://pypi.python.org/simple/',
help='Base URL of Python Package Index (default %default).')
extra_index_url = OptionMaker(
'--extra-index-url',
dest='extra_index_urls',
metavar='URL',
action='append',
default=[],
help='Extra URLs of package indexes to use in addition to --index-url.')
no_index = OptionMaker(
'--no-index',
dest='no_index',
action='store_true',
default=False,
help='Ignore package index (only looking at --find-links URLs instead).')
find_links = OptionMaker(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='url',
help="If a url or path to an html file, then parse for links to archives. If a local path or file:// url that's a directory, then look for archives in the directory listing.")
# TODO: Remove after 1.6
use_mirrors = OptionMaker(
'-M', '--use-mirrors',
dest='use_mirrors',
action='store_true',
default=False,
help=SUPPRESS_HELP)
# TODO: Remove after 1.6
mirrors = OptionMaker(
'--mirrors',
dest='mirrors',
metavar='URL',
action='append',
default=[],
help=SUPPRESS_HELP)
allow_external = OptionMaker(
"--allow-external",
dest="allow_external",
action="append",
default=[],
metavar="PACKAGE",
help="Allow the installation of externally hosted files",
)
allow_all_external = OptionMaker(
"--allow-all-external",
dest="allow_all_external",
action="store_true",
default=False,
help="Allow the installation of all externally hosted files",
)
# Remove after 1.7
no_allow_external = OptionMaker(
"--no-allow-external",
dest="allow_all_external",
action="store_false",
default=False,
help=SUPPRESS_HELP,
)
# Remove --allow-insecure after 1.7
allow_unsafe = OptionMaker(
"--allow-unverified", "--allow-insecure",
dest="allow_unverified",
action="append",
default=[],
metavar="PACKAGE",
help="Allow the installation of insecure and unverifiable files",
)
# Remove after 1.7
no_allow_unsafe = OptionMaker(
"--no-allow-insecure",
dest="allow_all_insecure",
action="store_false",
default=False,
help=SUPPRESS_HELP
)
# Remove after 1.5
process_dependency_links = OptionMaker(
"--process-dependency-links",
dest="process_dependency_links",
action="store_true",
default=False,
help="Enable the processing of dependency links.",
)
requirements = OptionMaker(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Install from the given requirements file. '
'This option can be used multiple times.')
use_wheel = OptionMaker(
'--use-wheel',
dest='use_wheel',
action='store_true',
help=SUPPRESS_HELP,
)
no_use_wheel = OptionMaker(
'--no-use-wheel',
dest='use_wheel',
action='store_false',
default=True,
help=('Do not Find and prefer wheel archives when searching indexes and '
'find-links locations.'),
)
download_cache = OptionMaker(
'--download-cache',
dest='download_cache',
metavar='dir',
default=None,
help='Cache downloaded packages in <dir>.')
no_deps = OptionMaker(
'--no-deps', '--no-dependencies',
dest='ignore_dependencies',
action='store_true',
default=False,
help="Don't install package dependencies.")
build_dir = OptionMaker(
'-b', '--build', '--build-dir', '--build-directory',
dest='build_dir',
metavar='dir',
default=build_prefix,
help='Directory to unpack packages into and build in. '
'The default in a virtualenv is "<venv path>/build". '
'The default for global installs is "<OS temp dir>/pip_build_<username>".')
install_options = OptionMaker(
'--install-option',
dest='install_options',
action='append',
metavar='options',
help="Extra arguments to be supplied to the setup.py install "
"command (use like --install-option=\"--install-scripts=/usr/local/bin\"). "
"Use multiple --install-option options to pass multiple options to setup.py install. "
"If you are using an option with a directory path, be sure to use absolute path.")
global_options = OptionMaker(
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the install command.")
no_clean = OptionMaker(
'--no-clean',
action='store_true',
default=False,
help="Don't clean up build directories.")
##########
# groups #
##########
general_group = {
'name': 'General Options',
'options': [
help_,
require_virtualenv,
verbose,
version,
quiet,
log_file,
log,
log_explicit_levels,
no_input,
proxy,
timeout,
default_vcs,
skip_requirements_regex,
exists_action,
cert,
]
}
index_group = {
'name': 'Package Index Options',
'options': [
index_url,
extra_index_url,
no_index,
find_links,
use_mirrors,
mirrors,
allow_external,
allow_all_external,
no_allow_external,
allow_unsafe,
no_allow_unsafe,
process_dependency_links,
]
}
| mit |
40223113/0623-w17 | static/Brython3.1.1-20150328-091302/Lib/_abcoll.py | 688 | 5155 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
DON'T USE THIS MODULE DIRECTLY! The classes here should be imported
via collections; they are defined here only to alleviate certain
bootstrapping issues. Unit tests are in test_collections.
"""
from abc import ABCMeta, abstractmethod
import sys
__all__ = ["Hashable", "Iterable", "Iterator",
"Sized", "Container", "Callable",
"Set", "MutableSet",
"Mapping", "MutableMapping",
"MappingView", "KeysView", "ItemsView", "ValuesView",
"Sequence", "MutableSequence",
"ByteString",
]
"""
### collection related types which are not exposed through builtin ###
## iterators ##
#fixme brython
#bytes_iterator = type(iter(b''))
bytes_iterator = type(iter(''))
#fixme brython
#bytearray_iterator = type(iter(bytearray()))
#callable_iterator = ???
dict_keyiterator = type(iter({}.keys()))
dict_valueiterator = type(iter({}.values()))
dict_itemiterator = type(iter({}.items()))
list_iterator = type(iter([]))
list_reverseiterator = type(iter(reversed([])))
range_iterator = type(iter(range(0)))
set_iterator = type(iter(set()))
str_iterator = type(iter(""))
tuple_iterator = type(iter(()))
zip_iterator = type(iter(zip()))
## views ##
dict_keys = type({}.keys())
dict_values = type({}.values())
dict_items = type({}.items())
## misc ##
dict_proxy = type(type.__dict__)
"""
def abstractmethod(self):
return self
### ONE-TRICK PONIES ###
#class Iterable(metaclass=ABCMeta):
class Iterable:
@abstractmethod
def __iter__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Iterable:
if any("__iter__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
#class Sized(metaclass=ABCMeta):
class Sized:
@abstractmethod
def __len__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Sized:
if any("__len__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
#class Container(metaclass=ABCMeta):
class Container:
@abstractmethod
def __contains__(self, x):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Container:
if any("__contains__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
### MAPPINGS ###
class Mapping(Sized, Iterable, Container):
@abstractmethod
def __getitem__(self, key):
raise KeyError
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def keys(self):
return KeysView(self)
def items(self):
return ItemsView(self)
def values(self):
return ValuesView(self)
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
return dict(self.items()) == dict(other.items())
def __ne__(self, other):
return not (self == other)
class MutableMapping(Mapping):
@abstractmethod
def __setitem__(self, key, value):
raise KeyError
@abstractmethod
def __delitem__(self, key):
raise KeyError
__marker = object()
def pop(self, key, default=__marker):
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def popitem(self):
try:
key = next(iter(self))
except StopIteration:
raise KeyError
value = self[key]
del self[key]
return key, value
def clear(self):
try:
while True:
self.popitem()
except KeyError:
pass
def update(*args, **kwds):
if len(args) > 2:
raise TypeError("update() takes at most 2 positional "
"arguments ({} given)".format(len(args)))
elif not args:
raise TypeError("update() takes at least 1 argument (0 given)")
self = args[0]
other = args[1] if len(args) >= 2 else ()
if isinstance(other, Mapping):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
#MutableMapping.register(dict)
| gpl-3.0 |
crewrktablets/rk29_kernel_2.6 | arch/ia64/scripts/unwcheck.py | 916 | 1718 | #!/usr/bin/env python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
Itxaka/st2 | st2api/st2api/controllers/v1/executionviews.py | 1 | 3355 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import chain
from pecan.rest import RestController
import six
from st2common import log as logging
from st2common.models.api.base import jsexpose
from st2common.persistence.execution import ActionExecution
LOG = logging.getLogger(__name__)
# List of supported filters and relation between filter name and execution property it represents.
# The same list is used both in ActionExecutionController to map filter names to properties and
# in FiltersController below to generate a list of unique values for each filter for UI so user
# could pick a filter from a drop down.
# If filter is unique for every execution or repeats very rarely (ex. execution id or parent
# reference) it should be also added to IGNORE_FILTERS to avoid bloating FiltersController
# response. Failure to do so will eventually result in Chrome hanging out while opening History
# tab of st2web.
SUPPORTED_FILTERS = {
'action': 'action.ref',
'status': 'status',
'liveaction': 'liveaction.id',
'parent': 'parent',
'rule': 'rule.name',
'runner': 'runner.name',
'timestamp': 'start_timestamp',
'trigger': 'trigger.name',
'trigger_type': 'trigger_type.name',
'trigger_instance': 'trigger_instance.id',
'user': 'liveaction.context.user'
}
# List of filters that are too broad to distinct by them and are very likely to represent 1 to 1
# relation between filter and particular history record.
IGNORE_FILTERS = ['parent', 'timestamp', 'liveaction', 'trigger_instance']
class FiltersController(RestController):
@jsexpose()
def get_all(self):
"""
List all distinct filters.
Handles requests:
GET /executions/views/filters
"""
filters = {}
for name, field in six.iteritems(SUPPORTED_FILTERS):
if name not in IGNORE_FILTERS:
if isinstance(field, six.string_types):
query = '$' + field
else:
dot_notation = list(chain.from_iterable(
('$' + item, '.') for item in field
))
dot_notation.pop(-1)
query = {'$concat': dot_notation}
aggregate = ActionExecution.aggregate([
{'$match': {'parent': None}},
{'$group': {'_id': query}}
])
filters[name] = [res['_id'] for res in aggregate['result'] if res['_id']]
return filters
class ExecutionViewsController(RestController):
filters = FiltersController()
| apache-2.0 |
Comunitea/alimentacion | stock_block_prodlots/wizard/wizard_block_prodlots.py | 5 | 4542 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2011 Pexego (<www.pexego.es>). All Rights Reserved
# $Omar Castiñeira Saavedra$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""wizard that add the functionally of blocked a production lot and prodlots affected"""
from osv import osv, fields
from tools.translate import _
class block_production_lot(osv.osv_memory):
_name = "block.production.lot"
_columns = {
'prodlot_id': fields.many2one('stock.production.lot', 'Production lot', required=True, readonly=True),
'case_name': fields.char('Blockade reason', size=64, required=True),
'case_description': fields.text('Description', required=True),
'firmness_grade': fields.selection([('pessimistic', 'Pessimistic'), ('optimistic', 'Optimistic')], 'Firmness', required=True, help="Pessimistic block upstream and downstream, optimistic only upstream")
}
_defaults = {
'firmness_grade': 'pessimistic'
}
def default_get(self, cr, uid, fields, context=None):
""" To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
if context is None: context = {}
prodlot_id = context and context.get('active_id', False) or False
res = super(block_production_lot, self).default_get(cr, uid, fields, context=context)
if 'prodlot_id' in fields:
res.update({'prodlot_id': prodlot_id})
return res
def lock_production_lot(self, cr, uid, ids, context=None):
"""set in_alert a production lot and the affected lots"""
if context is None: context = {}
#gets the objects
production_lot_obj = self.pool.get('stock.production.lot')
obj = self.browse(cr, uid, ids)[0]
#if the production lot already blocked, raises an exception
if obj.prodlot_id.blocked:
raise osv.except_osv(_('Message !'), _('The production lot is blocked yet.'))
elif obj.prodlot_id.in_alert:
raise osv.except_osv(_('Message !'), _('The production lot is blocked yet.'))
else:
#gets all prodlots when this prodlot tooks part
affected_prodlots = production_lot_obj.search_affected_prodlots(cr, uid, obj.prodlot_id.id, obj.firmness_grade == 'optimistic')
affected_prodlots.append(obj.prodlot_id.id)
blockade_id = self.pool.get('block.prodlot.cases').create(cr, uid, {
'name': obj.case_name,
'description': obj.case_description,
'blocked_prodlots_ids': [(6, 0, affected_prodlots)],
'parent_block_prodlot': obj.prodlot_id.id
})
production_lot_obj.write(cr, uid, affected_prodlots, {})
view_id = self.pool.get('ir.ui.view').search(cr, uid, [('model', '=', 'block.prodlot.cases'), ('type', '=', 'tree')])[0]
return {
'name': _('Block Prodlot Case'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'block.prodlot.cases',
'type': 'ir.actions.act_window',
'domain': [('id', '=', blockade_id)],
'nodestroy':True
}
return {'type': 'ir.actions.act_window_close'}
block_production_lot()
| agpl-3.0 |
liavkoren/djangoDev | django/core/management/commands/shell.py | 67 | 4163 | from optparse import make_option
import os
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
shells = ['ipython', 'bpython']
option_list = NoArgsCommand.option_list + (
make_option('--plain', action='store_true', dest='plain',
help='Tells Django to use plain Python, not IPython or bpython.'),
make_option('--no-startup', action='store_true', dest='no_startup',
help='When using plain Python, ignore the PYTHONSTARTUP environment variable and ~/.pythonrc.py script.'),
make_option('-i', '--interface', action='store', type='choice', choices=shells,
dest='interface',
help='Specify an interactive interpreter interface. Available options: "ipython" and "bpython"'),
)
help = "Runs a Python interactive interpreter. Tries to use IPython or bpython, if one of them is available."
requires_system_checks = False
def _ipython_pre_011(self):
"""Start IPython pre-0.11"""
from IPython.Shell import IPShell
shell = IPShell(argv=[])
shell.mainloop()
def _ipython_pre_100(self):
"""Start IPython pre-1.0.0"""
from IPython.frontend.terminal.ipapp import TerminalIPythonApp
app = TerminalIPythonApp.instance()
app.initialize(argv=[])
app.start()
def _ipython(self):
"""Start IPython >= 1.0"""
from IPython import start_ipython
start_ipython(argv=[])
def ipython(self):
"""Start any version of IPython"""
for ip in (self._ipython, self._ipython_pre_100, self._ipython_pre_011):
try:
ip()
except ImportError:
pass
else:
return
# no IPython, raise ImportError
raise ImportError("No IPython")
def bpython(self):
import bpython
bpython.embed()
def run_shell(self, shell=None):
available_shells = [shell] if shell else self.shells
for shell in available_shells:
try:
return getattr(self, shell)()
except ImportError:
pass
raise ImportError
def handle_noargs(self, **options):
use_plain = options.get('plain', False)
no_startup = options.get('no_startup', False)
interface = options.get('interface', None)
try:
if use_plain:
# Don't bother loading IPython, because the user wants plain Python.
raise ImportError
self.run_shell(shell=interface)
except ImportError:
import code
# Set up a dictionary to serve as the environment for the shell, so
# that tab completion works on objects that are imported at runtime.
# See ticket 5082.
imported_objects = {}
try: # Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try', because
# we already know 'readline' was imported successfully.
import rlcompleter
readline.set_completer(rlcompleter.Completer(imported_objects).complete)
readline.parse_and_bind("tab:complete")
# We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
# conventions and get $PYTHONSTARTUP first then .pythonrc.py.
if not no_startup:
for pythonrc in (os.environ.get("PYTHONSTARTUP"), '~/.pythonrc.py'):
if not pythonrc:
continue
pythonrc = os.path.expanduser(pythonrc)
if not os.path.isfile(pythonrc):
continue
try:
with open(pythonrc) as handle:
exec(compile(handle.read(), pythonrc, 'exec'), imported_objects)
except NameError:
pass
code.interact(local=imported_objects)
| bsd-3-clause |
alexcoman/labs | python/solutii/micu_matei/paranteze/paranteze.py | 5 | 1751 | #!/usr/bin/env python
# *-* coding: UTF-8 *-*
"""Tuxy scrie în fiecare zi foarte multe formule matematice.
Pentru că formulele sunt din ce în ce mai complicate trebuie
să folosească o serie de paranteze și a descoperit că cea
mai frecventă problemă a lui este că nu toate parantezele
sunt folosite cum trebuie.
Pentru acest lucru a apelat la ajutorul tău.
Câteva exemple:
- [] este bine
- []() este bine
- [()()] este bine
- ][ nu este bine
- (][][) nu este bine
- [)]()[(] nu este bine
"""
def get_end_paranteza(paranteza):
""" Returneaza opusul unei paranteze deschide """
if paranteza == '(':
return ')'
elif paranteza == '[':
return ']'
elif paranteza == '{':
return '}'
def este_corect(expresie):
"""Verifică dacă toate parantezele sunt folosite corespunzător."""
stiva = []
while len(expresie) > 0:
element = expresie[0]
expresie = expresie[1:]
if element in "([{":
stiva.append(element)
if element in ")]}":
if len(stiva) <= 0:
return False
if get_end_paranteza(stiva[len(stiva)-1]) != element:
return False
stiva.pop()
return True
if __name__ == "__main__":
assert este_corect("[()[]]"), "Probleme la expresia 1"
assert este_corect("()()[][]"), "Probleme la expresia 2"
assert este_corect("([([])])"), "Probleme la expresia 3"
assert not este_corect("[)()()()"), "Probleme la expresia 4"
assert not este_corect("][[()][]"), "Probleme la expresia 5"
assert not este_corect("([()]))"), "Probleme la expresia 6"
assert not este_corect("([)]"), "Probleme la expresia 7"
| mit |
Piasy/proxy-searcher | site-packages/django/contrib/admin/actions.py | 98 | 3205 | """
Built-in, globally-available admin actions.
"""
from django.core.exceptions import PermissionDenied
from django.contrib.admin import helpers
from django.contrib.admin.util import get_deleted_objects, model_ngettext
from django.db import router
from django.template.response import TemplateResponse
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext_lazy, ugettext as _
def delete_selected(modeladmin, request, queryset):
"""
Default action which deletes the selected objects.
This action first displays a confirmation page whichs shows all the
deleteable objects, or, if the user has no permission one of the related
childs (foreignkeys), a "permission denied" message.
Next, it delets all selected objects and redirects back to the change list.
"""
opts = modeladmin.model._meta
app_label = opts.app_label
# Check that the user has delete permission for the actual model
if not modeladmin.has_delete_permission(request):
raise PermissionDenied
using = router.db_for_write(modeladmin.model)
# Populate deletable_objects, a data structure of all related objects that
# will also be deleted.
deletable_objects, perms_needed, protected = get_deleted_objects(
queryset, opts, request.user, modeladmin.admin_site, using)
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if request.POST.get('post'):
if perms_needed:
raise PermissionDenied
n = queryset.count()
if n:
for obj in queryset:
obj_display = force_unicode(obj)
modeladmin.log_deletion(request, obj, obj_display)
queryset.delete()
modeladmin.message_user(request, _("Successfully deleted %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(modeladmin.opts, n)
})
# Return None to display the change list page again.
return None
if len(queryset) == 1:
objects_name = force_unicode(opts.verbose_name)
else:
objects_name = force_unicode(opts.verbose_name_plural)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": objects_name}
else:
title = _("Are you sure?")
context = {
"title": title,
"objects_name": objects_name,
"deletable_objects": [deletable_objects],
'queryset': queryset,
"perms_lacking": perms_needed,
"protected": protected,
"opts": opts,
"app_label": app_label,
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,
}
# Display the confirmation page
return TemplateResponse(request, modeladmin.delete_selected_confirmation_template or [
"admin/%s/%s/delete_selected_confirmation.html" % (app_label, opts.object_name.lower()),
"admin/%s/delete_selected_confirmation.html" % app_label,
"admin/delete_selected_confirmation.html"
], context, current_app=modeladmin.admin_site.name)
delete_selected.short_description = ugettext_lazy("Delete selected %(verbose_name_plural)s")
| mit |
teeple/pns_server | work/install/Python-2.7.4/Demo/tkinter/guido/dialog.py | 47 | 3202 | #! /usr/bin/env python
# A Python function that generates dialog boxes with a text message,
# optional bitmap, and any number of buttons.
# Cf. Ousterhout, Tcl and the Tk Toolkit, Figs. 27.2-3, pp. 269-270.
from Tkinter import *
import sys
def dialog(master, title, text, bitmap, default, *args):
# 1. Create the top-level window and divide it into top
# and bottom parts.
w = Toplevel(master, class_='Dialog')
w.title(title)
w.iconname('Dialog')
top = Frame(w, relief=RAISED, borderwidth=1)
top.pack(side=TOP, fill=BOTH)
bot = Frame(w, relief=RAISED, borderwidth=1)
bot.pack(side=BOTTOM, fill=BOTH)
# 2. Fill the top part with the bitmap and message.
msg = Message(top, width='3i', text=text,
font='-Adobe-Times-Medium-R-Normal-*-180-*')
msg.pack(side=RIGHT, expand=1, fill=BOTH, padx='3m', pady='3m')
if bitmap:
bm = Label(top, bitmap=bitmap)
bm.pack(side=LEFT, padx='3m', pady='3m')
# 3. Create a row of buttons at the bottom of the dialog.
var = IntVar()
buttons = []
i = 0
for but in args:
b = Button(bot, text=but, command=lambda v=var,i=i: v.set(i))
buttons.append(b)
if i == default:
bd = Frame(bot, relief=SUNKEN, borderwidth=1)
bd.pack(side=LEFT, expand=1, padx='3m', pady='2m')
b.lift()
b.pack (in_=bd, side=LEFT,
padx='2m', pady='2m', ipadx='2m', ipady='1m')
else:
b.pack (side=LEFT, expand=1,
padx='3m', pady='3m', ipadx='2m', ipady='1m')
i = i+1
# 4. Set up a binding for <Return>, if there's a default,
# set a grab, and claim the focus too.
if default >= 0:
w.bind('<Return>',
lambda e, b=buttons[default], v=var, i=default:
(b.flash(),
v.set(i)))
oldFocus = w.focus_get()
w.grab_set()
w.focus_set()
# 5. Wait for the user to respond, then restore the focus
# and return the index of the selected button.
w.waitvar(var)
w.destroy()
if oldFocus: oldFocus.focus_set()
return var.get()
# The rest is the test program.
def go():
i = dialog(mainWidget,
'Not Responding',
"The file server isn't responding right now; "
"I'll keep trying.",
'',
-1,
'OK')
print 'pressed button', i
i = dialog(mainWidget,
'File Modified',
'File "tcl.h" has been modified since '
'the last time it was saved. '
'Do you want to save it before exiting the application?',
'warning',
0,
'Save File',
'Discard Changes',
'Return To Editor')
print 'pressed button', i
def test():
import sys
global mainWidget
mainWidget = Frame()
Pack.config(mainWidget)
start = Button(mainWidget, text='Press Here To Start', command=go)
start.pack()
endit = Button(mainWidget, text="Exit", command=sys.exit)
endit.pack(fill=BOTH)
mainWidget.mainloop()
if __name__ == '__main__':
test()
| gpl-2.0 |
40223234/2015cdb_g1_0134 | static/Brython3.1.1-20150328-091302/Lib/fnmatch.py | 894 | 3163 | """Filename matching with shell patterns.
fnmatch(FILENAME, PATTERN) matches according to the local convention.
fnmatchcase(FILENAME, PATTERN) always takes case in account.
The functions operate by translating the pattern into a regular
expression. They cache the compiled regular expressions for speed.
The function translate(PATTERN) returns a regular expression
corresponding to PATTERN. (It does not compile it.)
"""
import os
import posixpath
import re
import functools
__all__ = ["filter", "fnmatch", "fnmatchcase", "translate"]
def fnmatch(name, pat):
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
"""
name = os.path.normcase(name)
pat = os.path.normcase(pat)
return fnmatchcase(name, pat)
@functools.lru_cache(maxsize=256, typed=True)
def _compile_pattern(pat):
if isinstance(pat, bytes):
pat_str = str(pat, 'ISO-8859-1')
res_str = translate(pat_str)
res = bytes(res_str, 'ISO-8859-1')
else:
res = translate(pat)
return re.compile(res).match
def filter(names, pat):
"""Return the subset of the list NAMES that match PAT."""
result = []
pat = os.path.normcase(pat)
match = _compile_pattern(pat)
if os.path is posixpath:
# normcase on posix is NOP. Optimize it away from the loop.
for name in names:
if match(name):
result.append(name)
else:
for name in names:
if match(os.path.normcase(name)):
result.append(name)
return result
def fnmatchcase(name, pat):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
its arguments.
"""
match = _compile_pattern(pat)
return match(name) is not None
def translate(pat):
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
i, n = 0, len(pat)
res = ''
while i < n:
c = pat[i]
i = i+1
if c == '*':
res = res + '.*'
elif c == '?':
res = res + '.'
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j+1
if j < n and pat[j] == ']':
j = j+1
while j < n and pat[j] != ']':
j = j+1
if j >= n:
res = res + '\\['
else:
stuff = pat[i:j].replace('\\','\\\\')
i = j+1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
else:
res = res + re.escape(c)
return res + '\Z(?ms)'
| gpl-3.0 |
jyotikamboj/container | django/contrib/auth/management/__init__.py | 9 | 6413 | """
Creates permissions for all installed apps that need permissions.
"""
from __future__ import unicode_literals
import getpass
import unicodedata
from django.apps import apps
from django.contrib.auth import get_permission_codename
from django.core import exceptions
from django.core.management.base import CommandError
from django.db import DEFAULT_DB_ALIAS, router
from django.utils.encoding import DEFAULT_LOCALE_ENCODING
from django.utils import six
def _get_all_permissions(opts, ctype):
"""
Returns (codename, name) for all permissions in the given opts.
"""
builtin = _get_builtin_permissions(opts)
custom = list(opts.permissions)
_check_permission_clashing(custom, builtin, ctype)
return builtin + custom
def _get_builtin_permissions(opts):
"""
Returns (codename, name) for all autogenerated permissions.
By default, this is ('add', 'change', 'delete')
"""
perms = []
for action in opts.default_permissions:
perms.append((get_permission_codename(action, opts),
'Can %s %s' % (action, opts.verbose_name_raw)))
return perms
def _check_permission_clashing(custom, builtin, ctype):
"""
Check that permissions for a model do not clash. Raises CommandError if
there are duplicate permissions.
"""
pool = set()
builtin_codenames = set(p[0] for p in builtin)
for codename, _name in custom:
if codename in pool:
raise CommandError(
"The permission codename '%s' is duplicated for model '%s.%s'." %
(codename, ctype.app_label, ctype.model_class().__name__))
elif codename in builtin_codenames:
raise CommandError(
"The permission codename '%s' clashes with a builtin permission "
"for model '%s.%s'." %
(codename, ctype.app_label, ctype.model_class().__name__))
pool.add(codename)
def create_permissions(app_config, verbosity=2, interactive=True, using=DEFAULT_DB_ALIAS, **kwargs):
if not app_config.models_module:
return
try:
Permission = apps.get_model('auth', 'Permission')
except LookupError:
return
if not router.allow_migrate(using, Permission):
return
from django.contrib.contenttypes.models import ContentType
# This will hold the permissions we're looking for as
# (content_type, (codename, name))
searched_perms = list()
# The codenames and ctypes that should exist.
ctypes = set()
for klass in app_config.get_models():
# Force looking up the content types in the current database
# before creating foreign keys to them.
ctype = ContentType.objects.db_manager(using).get_for_model(klass)
ctypes.add(ctype)
for perm in _get_all_permissions(klass._meta, ctype):
searched_perms.append((ctype, perm))
# Find all the Permissions that have a content_type for a model we're
# looking for. We don't need to check for codenames since we already have
# a list of the ones we're going to create.
all_perms = set(Permission.objects.using(using).filter(
content_type__in=ctypes,
).values_list(
"content_type", "codename"
))
perms = [
Permission(codename=codename, name=name, content_type=ct)
for ct, (codename, name) in searched_perms
if (ct.pk, codename) not in all_perms
]
# Validate the permissions before bulk_creation to avoid cryptic
# database error when the verbose_name is longer than 50 characters
permission_name_max_length = Permission._meta.get_field('name').max_length
verbose_name_max_length = permission_name_max_length - 11 # len('Can change ') prefix
for perm in perms:
if len(perm.name) > permission_name_max_length:
raise exceptions.ValidationError(
"The verbose_name of %s is longer than %s characters" % (
perm.content_type,
verbose_name_max_length,
)
)
Permission.objects.using(using).bulk_create(perms)
if verbosity >= 2:
for perm in perms:
print("Adding permission '%s'" % perm)
def get_system_username():
"""
Try to determine the current system user's username.
:returns: The username as a unicode string, or an empty string if the
username could not be determined.
"""
try:
result = getpass.getuser()
except (ImportError, KeyError):
# KeyError will be raised by os.getpwuid() (called by getuser())
# if there is no corresponding entry in the /etc/passwd file
# (a very restricted chroot environment, for example).
return ''
if six.PY2:
try:
result = result.decode(DEFAULT_LOCALE_ENCODING)
except UnicodeDecodeError:
# UnicodeDecodeError - preventive treatment for non-latin Windows.
return ''
return result
def get_default_username(check_db=True):
"""
Try to determine the current system user's username to use as a default.
:param check_db: If ``True``, requires that the username does not match an
existing ``auth.User`` (otherwise returns an empty string).
:returns: The username, or an empty string if no username can be
determined.
"""
# This file is used in apps.py, it should not trigger models import.
from django.contrib.auth import models as auth_app
# If the User model has been swapped out, we can't make any assumptions
# about the default user name.
if auth_app.User._meta.swapped:
return ''
default_username = get_system_username()
try:
default_username = (unicodedata.normalize('NFKD', default_username)
.encode('ascii', 'ignore').decode('ascii')
.replace(' ', '').lower())
except UnicodeDecodeError:
return ''
# Run the username validator
try:
auth_app.User._meta.get_field('username').run_validators(default_username)
except exceptions.ValidationError:
return ''
# Don't return the default username if it is already taken.
if check_db and default_username:
try:
auth_app.User._default_manager.get(username=default_username)
except auth_app.User.DoesNotExist:
pass
else:
return ''
return default_username
| mit |
JDRomano2/VenomKB | venomkb/archive/scripts/add_go_data.py | 1 | 1105 | import json
from tqdm import tqdm
from venomkb_builder import VenomKB
VKB = VenomKB()
VKB.load_database()
go_annotations_out = {}
for x in tqdm(VKB.proteins):
try:
toxprot = VKB.get_record_from_toxprot(x.venomkb_id, 'dbReference', json=False)
except:
continue
go_annotations = [y for y in toxprot if ('type', 'GO') in y.items()]
this_protein = []
for go in go_annotations:
current = {}
go_id = [z[1] for z in go.items() if z[0] == 'id'][0]
for prop in go:
dict_form = dict(prop.items())
current[dict_form['type']] = dict_form['value']
current['id'] = go_id
# append to temporary list of structured go_annotations
this_protein.append(current)
# push to global list of go_annotations
go_annotations_out[x.venomkb_id] = this_protein
'''
for vkbid, annot_list in tqdm(go_annotations_out.iteritems()):
VKB.add_to_existing(vkbid=vkbid,
new_key='go_annotations',
new_value=annot_list,
replace_if_exist=True)
'''
| gpl-2.0 |
tesb/flask-crystal | venv/Lib/ntpath.py | 127 | 18457 | # Module 'ntpath' -- common operations on WinNT/Win95 pathnames
"""Common pathname manipulations, WindowsNT/95 version.
Instead of importing this module directly, import os and refer to this
module as os.path.
"""
import os
import sys
import stat
import genericpath
import warnings
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
"extsep","devnull","realpath","supports_unicode_filenames","relpath"]
# strings representing various path-related bits and pieces
curdir = '.'
pardir = '..'
extsep = '.'
sep = '\\'
pathsep = ';'
altsep = '/'
defpath = '.;C:\\bin'
if 'ce' in sys.builtin_module_names:
defpath = '\\Windows'
elif 'os2' in sys.builtin_module_names:
# OS/2 w/ VACPP
altsep = '/'
devnull = 'nul'
# Normalize the case of a pathname and map slashes to backslashes.
# Other normalizations (such as optimizing '../' away) are not done
# (this is done by normpath).
def normcase(s):
"""Normalize case of pathname.
Makes all characters lowercase and all slashes into backslashes."""
return s.replace("/", "\\").lower()
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
# For DOS it is absolute if it starts with a slash or backslash (current
# volume), or if a pathname after the volume letter and colon / UNC resource
# starts with a slash or backslash.
def isabs(s):
"""Test whether a path is absolute"""
s = splitdrive(s)[1]
return s != '' and s[:1] in '/\\'
# Join two (or more) paths.
def join(a, *p):
"""Join two or more pathname components, inserting "\\" as needed.
If any component is an absolute path, all previous path components
will be discarded."""
path = a
for b in p:
b_wins = 0 # set to 1 iff b makes path irrelevant
if path == "":
b_wins = 1
elif isabs(b):
# This probably wipes out path so far. However, it's more
# complicated if path begins with a drive letter:
# 1. join('c:', '/a') == 'c:/a'
# 2. join('c:/', '/a') == 'c:/a'
# But
# 3. join('c:/a', '/b') == '/b'
# 4. join('c:', 'd:/') = 'd:/'
# 5. join('c:/', 'd:/') = 'd:/'
if path[1:2] != ":" or b[1:2] == ":":
# Path doesn't start with a drive letter, or cases 4 and 5.
b_wins = 1
# Else path has a drive letter, and b doesn't but is absolute.
elif len(path) > 3 or (len(path) == 3 and
path[-1] not in "/\\"):
# case 3
b_wins = 1
if b_wins:
path = b
else:
# Join, and ensure there's a separator.
assert len(path) > 0
if path[-1] in "/\\":
if b and b[0] in "/\\":
path += b[1:]
else:
path += b
elif path[-1] == ":":
path += b
elif b:
if b[0] in "/\\":
path += b
else:
path += "\\" + b
else:
# path is not empty and does not end with a backslash,
# but b is empty; since, e.g., split('a/') produces
# ('a', ''), it's best if join() adds a backslash in
# this case.
path += '\\'
return path
# Split a path in a drive specification (a drive letter followed by a
# colon) and the path specification.
# It is always true that drivespec + pathspec == p
def splitdrive(p):
"""Split a pathname into drive and path specifiers. Returns a 2-tuple
"(drive,path)"; either part may be empty"""
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
# Parse UNC paths
def splitunc(p):
"""Split a pathname into UNC mount point and relative path specifiers.
Return a 2-tuple (unc, rest); either part may be empty.
If unc is not empty, it has the form '//host/mount' (or similar
using backslashes). unc+rest is always the input path.
Paths containing drive letters never have an UNC part.
"""
if p[1:2] == ':':
return '', p # Drive letter present
firstTwo = p[0:2]
if firstTwo == '//' or firstTwo == '\\\\':
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
# \\machine\mountpoint\directories...
# directory ^^^^^^^^^^^^^^^
normp = normcase(p)
index = normp.find('\\', 2)
if index == -1:
##raise RuntimeError, 'illegal UNC path: "' + p + '"'
return ("", p)
index = normp.find('\\', index + 1)
if index == -1:
index = len(p)
return p[:index], p[index:]
return '', p
# Split a path in head (everything up to the last '/') and tail (the
# rest). After the trailing '/' is stripped, the invariant
# join(head, tail) == p holds.
# The resulting head won't end in '/' unless it is the root.
def split(p):
"""Split a pathname.
Return tuple (head, tail) where tail is everything after the final slash.
Either part may be empty."""
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
while i and p[i-1] not in '/\\':
i = i - 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head2 = head
while head2 and head2[-1] in '/\\':
head2 = head2[:-1]
head = head2 or head
return d + head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Return the tail (basename) part of a path.
def basename(p):
"""Returns the final component of a pathname"""
return split(p)[1]
# Return the head (dirname) part of a path.
def dirname(p):
"""Returns the directory component of a pathname"""
return split(p)[0]
# Is a path a symbolic link?
# This will always return false on systems where posix.lstat doesn't exist.
def islink(path):
"""Test for symbolic link.
On WindowsNT/95 and OS/2 always returns false
"""
return False
# alias exists to lexists
lexists = exists
# Is a path a mount point? Either a root (with or without drive letter)
# or an UNC path with at most a / or \ after the mount point.
def ismount(path):
"""Test whether a path is a mount point (defined as root of drive)"""
unc, rest = splitunc(path)
if unc:
return rest in ("", "/", "\\")
p = splitdrive(path)[1]
return len(p) == 1 and p[0] in '/\\'
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.",
stacklevel=2)
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = os.environ['HOMEDRIVE']
except KeyError:
drive = ''
userhome = join(drive, os.environ['HOMEPATH'])
if i != 1: #~user
userhome = join(dirname(userhome), path[1:i])
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# The following rules apply:
# - no expansion within single quotes
# - '$$' is translated into '$'
# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2%
# - ${varname} is accepted.
# - $varname is accepted.
# - %varname% is accepted.
# - varnames can be made out of letters, digits and the characters '_-'
# (though is not verified in the ${varname} and %varname% cases)
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
def expandvars(path):
"""Expand shell variables of the forms $var, ${var} and %var%.
Unknown variables are left unchanged."""
if '$' not in path and '%' not in path:
return path
import string
varchars = string.ascii_letters + string.digits + '_-'
res = ''
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index]
if c == '\'': # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = path.index('\'')
res = res + '\'' + path[:index + 1]
except ValueError:
res = res + path
index = pathlen - 1
elif c == '%': # variable or '%'
if path[index + 1:index + 2] == '%':
res = res + c
index = index + 1
else:
path = path[index+1:]
pathlen = len(path)
try:
index = path.index('%')
except ValueError:
res = res + '%' + path
index = pathlen - 1
else:
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '%' + var + '%'
elif c == '$': # variable or '$$'
if path[index + 1:index + 2] == '$':
res = res + c
index = index + 1
elif path[index + 1:index + 2] == '{':
path = path[index+2:]
pathlen = len(path)
try:
index = path.index('}')
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '${' + var + '}'
except ValueError:
res = res + '${' + path
index = pathlen - 1
else:
var = ''
index = index + 1
c = path[index:index + 1]
while c != '' and c in varchars:
var = var + c
index = index + 1
c = path[index:index + 1]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '$' + var
if c != '':
index = index - 1
else:
res = res + c
index = index + 1
return res
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.
# Previously, this function also truncated pathnames to 8+3 format,
# but as this module is called "ntpath", that's obviously wrong!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
# Preserve unicode (if path is unicode)
backslash, dot = (u'\\', u'.') if isinstance(path, unicode) else ('\\', '.')
if path.startswith(('\\\\.\\', '\\\\?\\')):
# in the case of paths with these prefixes:
# \\.\ -> device names
# \\?\ -> literal paths
# do not do any normalization, but return the path unchanged
return path
path = path.replace("/", "\\")
prefix, path = splitdrive(path)
# We need to be careful here. If the prefix is empty, and the path starts
# with a backslash, it could either be an absolute path on the current
# drive (\dir1\dir2\file) or a UNC filename (\\server\mount\dir1\file). It
# is therefore imperative NOT to collapse multiple backslashes blindly in
# that case.
# The code below preserves multiple backslashes when there is no drive
# letter. This means that the invalid filename \\\a\b is preserved
# unchanged, where a\\\b is normalised to a\b. It's not clear that there
# is any better behaviour for such edge cases.
if prefix == '':
# No drive letter - preserve initial backslashes
while path[:1] == "\\":
prefix = prefix + backslash
path = path[1:]
else:
# We have a drive letter - collapse initial backslashes
if path.startswith("\\"):
prefix = prefix + backslash
path = path.lstrip("\\")
comps = path.split("\\")
i = 0
while i < len(comps):
if comps[i] in ('.', ''):
del comps[i]
elif comps[i] == '..':
if i > 0 and comps[i-1] != '..':
del comps[i-1:i+1]
i -= 1
elif i == 0 and prefix.endswith("\\"):
del comps[i]
else:
i += 1
else:
i += 1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append(dot)
return prefix + backslash.join(comps)
# Return an absolute path.
try:
from nt import _getfullpathname
except ImportError: # not running on Windows - mock up something sensible
def abspath(path):
"""Return the absolute version of a path."""
if not isabs(path):
if isinstance(path, unicode):
cwd = os.getcwdu()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
else: # use native Windows method on Windows
def abspath(path):
"""Return the absolute version of a path."""
if path: # Empty path must return current working directory.
try:
path = _getfullpathname(path)
except WindowsError:
pass # Bad path - return unchanged.
elif isinstance(path, unicode):
path = os.getcwdu()
else:
path = os.getcwd()
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
# Win9x family and earlier have no Unicode filename support.
supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and
sys.getwindowsversion()[3] >= 2)
def _abspath_split(path):
abs = abspath(normpath(path))
prefix, rest = splitunc(abs)
is_unc = bool(prefix)
if not is_unc:
prefix, rest = splitdrive(abs)
return is_unc, prefix, [x for x in rest.split(sep) if x]
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_is_unc, start_prefix, start_list = _abspath_split(start)
path_is_unc, path_prefix, path_list = _abspath_split(path)
if path_is_unc ^ start_is_unc:
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
if path_prefix.lower() != start_prefix.lower():
if path_is_unc:
raise ValueError("path is on UNC root %s, start on UNC root %s"
% (path_prefix, start_prefix))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_prefix, start_prefix))
# Work out how much of the filepath is shared by start and path.
i = 0
for e1, e2 in zip(start_list, path_list):
if e1.lower() != e2.lower():
break
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
try:
# The genericpath.isdir implementation uses os.stat and checks the mode
# attribute to tell whether or not the path is a directory.
# This is overkill on Windows - just pass the path to GetFileAttributes
# and check the attribute from there.
from nt import _isdir as isdir
except ImportError:
# Use genericpath.isdir as imported above.
pass
| apache-2.0 |
aecepoglu/twitchy-streamer-python | test/steps/all.py | 1 | 4759 | from behave import *
from unittest.mock import patch, mock_open, MagicMock
from src import arg_parser as myArgParser, server_utils as myServerUtils, errors as myErrors
import io
import requests
import requests_mock
def setup_debug_on_error(userdata):
global BEHAVE_DEBUG_ON_ERROR
BEHAVE_DEBUG_ON_ERROR = userdata.getbool("BEHAVE_DEBUG_ON_ERROR")
def before_all(context):
setup_debug_on_error(context.config.userdata)
def after_step(context, step):
raise step.exc_traceback
if step.status == "failed":
import ipdb
ipdb.post_mortem(step.exc_traceback)
def before_scenario(context):
pass
@given('a file at "{filepath}"')
def step_impl(context, filepath):
previousOpenMock = None
previousIsfileMock = None
content = context.text
if "openMock" in context:
previousOpenMock = context.openMock
if "isfileMock" in context:
previousIsfileMock = context.isfileMock
def my_open(filename, openOpt="r"):
assert(openOpt == "r")
if (filename == filepath):
return io.StringIO(content)
elif previousOpenMock:
return previousOpenMock(filename, openOpt)
else:
raise FileNotFoundError(filename)
def my_isfile(x):
if (x == filepath):
return True
elif previousIsfileMock:
return previousIsfileMock(x)
else:
return False
context.openMock = my_open
context.isfileMock = my_isfile
@given('a directory at "{path}" exists')
def step_impl(context, path):
previousMock = None
if "isdirMock" in context:
previousMock = context.isdirMock
def my_isdir(x):
if (x == path):
return True
elif previousMock:
return previousMock(x)
else:
return False
context.isdirMock = my_isdir
@given('program arguments are "{args}"')
def step_impl(context, args):
context.cmdArgs = args.split()
@when('it parses arguments')
def step_impl(context):
if "openMock" not in context:
context.openMock = True
if "isfileMock" not in context:
context.isfileMock = MagicMock(return_value = False)
if "isdirMock" not in context:
context.isdirMock = MagicMock(return_value = False)
with patch("builtins.open", context.openMock):
with patch("os.path.isfile", context.isfileMock):
with patch("os.path.isdir", context.isdirMock):
try:
context.parsedArgs = myArgParser.parse(context.cmdArgs)
context.raisedException = False
except myErrors.MyError as err:
context.raisedException = err
@then('config has "{pattern}" in "{targetList}" list')
def step_impl(context, pattern, targetList):
assert(targetList in context.parsedArgs)
assert(isinstance(context.parsedArgs[targetList], list))
assert(pattern in context.parsedArgs[targetList])
@then('config has "{value}" at "{target}"')
def step_impl(context, value, target):
assert(target in context.parsedArgs)
assert(context.parsedArgs[target] == value)
@then('config has "{target}"')
def step_impl(context, target):
assert(target in context.parsedArgs)
@when('I start the program')
def step_impl(context):
raise NotImplementedError('STEP: When I start the program')
@given('I entered no arguments')
def step_impl(context):
context.cmdArgs = []
pass
@then('it should fail')
def step_impl(context):
assert(context.raisedException)
@then('it should prompt a link to download one')
def step_impl(context):
assert("can find a sample config file at" in str(context.raisedException))
@then('it should say configs must contain publishLink')
def step_impl(context):
assert("publishLink" in str(context.raisedException))
#TODO may be improve this a bit
@given(u'a server at "{host}" responds to "{method}" "{path}" with "{responseText}"')
def step_impl(context, host, method, path, responseText):
_addr = host + path
_responseText = responseText
_method = method
previousRegistrar = None
if "registerRequestsMock" in context:
previousRegistrar = context.registerRequestsMock
def fun(mocker):
mocker.register_uri(_method, _addr, text = _responseText)
if previousRegistrar:
previousRegistrar()
context.registerRequestsMock = fun
@given(u'my config has "{value}" as "{key}"')
def step_impl(context, value, key):
if "myConfig" not in context:
context.myConfig = {}
context.myConfig[key] = value
@when(u'it checks for version')
def step_impl(context):
try:
with requests_mock.Mocker() as m:
context.registerRequestsMock(m)
myServerUtils.check(context.myConfig)
context.raisedException = False
except myErrors.MyError as err:
context.raisedException = err
@then(u'it should succeed')
def step_impl(context):
assert(not context.raisedException)
@then(u'it should notify me about the newer version')
def step_impl(context):
#TODO I need to verify this somehow
pass
@then(u'give me version incompatibility error')
def step_impl(context):
assert("no longer supported" in str(context.raisedException))
| mit |
evanson/yowsup | yowsup/layers/protocol_contacts/protocolentities/notification_contact_update.py | 68 | 1295 | from yowsup.structs import ProtocolTreeNode
from .notification_contact import ContactNotificationProtocolEntity
class UpdateContactNotificationProtocolEntity(ContactNotificationProtocolEntity):
'''
<notification offline="0" id="{{NOTIFICATION_ID}}" notify="{{NOTIFY_NAME}}" type="contacts"
t="{{TIMESTAMP}}" from="{{SENDER_JID}}">
<update jid="{{SET_JID}}"> </update>
</notification>
'''
def __init__(self, _id, _from, timestamp, notify, offline, contactJid):
super(UpdateContactNotificationProtocolEntity, self).__init__(_id, _from, timestamp, notify, offline)
self.setData(contactJid)
def setData(self, jid):
self.contactJid = jid
def toProtocolTreeNode(self):
node = super(UpdateContactNotificationProtocolEntity, self).toProtocolTreeNode()
removeNode = ProtocolTreeNode("update", {"jid": self.contactJid}, None, None)
node.addChild(removeNode)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = ContactNotificationProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = UpdateContactNotificationProtocolEntity
removeNode = node.getChild("update")
entity.setData(removeNode.getAttributeValue("jid"))
return entity | gpl-3.0 |
TarasRudnyk/scrapy | scrapy/linkextractors/htmlparser.py | 90 | 2883 | """
HTMLParser-based link extractor
"""
import warnings
from six.moves.html_parser import HTMLParser
from six.moves.urllib.parse import urljoin
from w3lib.url import safe_url_string
from scrapy.link import Link
from scrapy.utils.python import unique as unique_list
from scrapy.exceptions import ScrapyDeprecationWarning
class HtmlParserLinkExtractor(HTMLParser):
def __init__(self, tag="a", attr="href", process=None, unique=False):
HTMLParser.__init__(self)
warnings.warn(
"HtmlParserLinkExtractor is deprecated and will be removed in "
"future releases. Please use scrapy.linkextractors.LinkExtractor",
ScrapyDeprecationWarning, stacklevel=2,
)
self.scan_tag = tag if callable(tag) else lambda t: t == tag
self.scan_attr = attr if callable(attr) else lambda a: a == attr
self.process_attr = process if callable(process) else lambda v: v
self.unique = unique
def _extract_links(self, response_text, response_url, response_encoding):
self.reset()
self.feed(response_text)
self.close()
links = unique_list(self.links, key=lambda link: link.url) if self.unique else self.links
ret = []
base_url = urljoin(response_url, self.base_url) if self.base_url else response_url
for link in links:
if isinstance(link.url, unicode):
link.url = link.url.encode(response_encoding)
try:
link.url = urljoin(base_url, link.url)
except ValueError:
continue
link.url = safe_url_string(link.url, response_encoding)
link.text = link.text.decode(response_encoding)
ret.append(link)
return ret
def extract_links(self, response):
# wrapper needed to allow to work directly with text
return self._extract_links(response.body, response.url, response.encoding)
def reset(self):
HTMLParser.reset(self)
self.base_url = None
self.current_link = None
self.links = []
def handle_starttag(self, tag, attrs):
if tag == 'base':
self.base_url = dict(attrs).get('href')
if self.scan_tag(tag):
for attr, value in attrs:
if self.scan_attr(attr):
url = self.process_attr(value)
link = Link(url=url)
self.links.append(link)
self.current_link = link
def handle_endtag(self, tag):
if self.scan_tag(tag):
self.current_link = None
def handle_data(self, data):
if self.current_link:
self.current_link.text = self.current_link.text + data
def matches(self, url):
"""This extractor matches with any url, since
it doesn't contain any patterns"""
return True
| bsd-3-clause |
amisrs/angular-flask | angular_flask/lib/python2.7/site-packages/flask/helpers.py | 776 | 33793 | # -*- coding: utf-8 -*-
"""
flask.helpers
~~~~~~~~~~~~~
Implements various helpers.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import pkgutil
import posixpath
import mimetypes
from time import time
from zlib import adler32
from threading import RLock
from werkzeug.routing import BuildError
from functools import update_wrapper
try:
from werkzeug.urls import url_quote
except ImportError:
from urlparse import quote as url_quote
from werkzeug.datastructures import Headers
from werkzeug.exceptions import NotFound
# this was moved in 0.7
try:
from werkzeug.wsgi import wrap_file
except ImportError:
from werkzeug.utils import wrap_file
from jinja2 import FileSystemLoader
from .signals import message_flashed
from .globals import session, _request_ctx_stack, _app_ctx_stack, \
current_app, request
from ._compat import string_types, text_type
# sentinel
_missing = object()
# what separators does this operating system provide that are not a slash?
# this is used by the send_from_directory function to ensure that nobody is
# able to access files from outside the filesystem.
_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep]
if sep not in (None, '/'))
def _endpoint_from_view_func(view_func):
"""Internal helper that returns the default endpoint for a given
function. This always is the function name.
"""
assert view_func is not None, 'expected view func if endpoint ' \
'is not provided.'
return view_func.__name__
def stream_with_context(generator_or_function):
"""Request contexts disappear when the response is started on the server.
This is done for efficiency reasons and to make it less likely to encounter
memory leaks with badly written WSGI middlewares. The downside is that if
you are using streamed responses, the generator cannot access request bound
information any more.
This function however can help you keep the context around for longer::
from flask import stream_with_context, request, Response
@app.route('/stream')
def streamed_response():
@stream_with_context
def generate():
yield 'Hello '
yield request.args['name']
yield '!'
return Response(generate())
Alternatively it can also be used around a specific generator::
from flask import stream_with_context, request, Response
@app.route('/stream')
def streamed_response():
def generate():
yield 'Hello '
yield request.args['name']
yield '!'
return Response(stream_with_context(generate()))
.. versionadded:: 0.9
"""
try:
gen = iter(generator_or_function)
except TypeError:
def decorator(*args, **kwargs):
gen = generator_or_function()
return stream_with_context(gen)
return update_wrapper(decorator, generator_or_function)
def generator():
ctx = _request_ctx_stack.top
if ctx is None:
raise RuntimeError('Attempted to stream with context but '
'there was no context in the first place to keep around.')
with ctx:
# Dummy sentinel. Has to be inside the context block or we're
# not actually keeping the context around.
yield None
# The try/finally is here so that if someone passes a WSGI level
# iterator in we're still running the cleanup logic. Generators
# don't need that because they are closed on their destruction
# automatically.
try:
for item in gen:
yield item
finally:
if hasattr(gen, 'close'):
gen.close()
# The trick is to start the generator. Then the code execution runs until
# the first dummy None is yielded at which point the context was already
# pushed. This item is discarded. Then when the iteration continues the
# real generator is executed.
wrapped_g = generator()
next(wrapped_g)
return wrapped_g
def make_response(*args):
"""Sometimes it is necessary to set additional headers in a view. Because
views do not have to return response objects but can return a value that
is converted into a response object by Flask itself, it becomes tricky to
add headers to it. This function can be called instead of using a return
and you will get a response object which you can use to attach headers.
If view looked like this and you want to add a new header::
def index():
return render_template('index.html', foo=42)
You can now do something like this::
def index():
response = make_response(render_template('index.html', foo=42))
response.headers['X-Parachutes'] = 'parachutes are cool'
return response
This function accepts the very same arguments you can return from a
view function. This for example creates a response with a 404 error
code::
response = make_response(render_template('not_found.html'), 404)
The other use case of this function is to force the return value of a
view function into a response which is helpful with view
decorators::
response = make_response(view_function())
response.headers['X-Parachutes'] = 'parachutes are cool'
Internally this function does the following things:
- if no arguments are passed, it creates a new response argument
- if one argument is passed, :meth:`flask.Flask.make_response`
is invoked with it.
- if more than one argument is passed, the arguments are passed
to the :meth:`flask.Flask.make_response` function as tuple.
.. versionadded:: 0.6
"""
if not args:
return current_app.response_class()
if len(args) == 1:
args = args[0]
return current_app.make_response(args)
def url_for(endpoint, **values):
"""Generates a URL to the given endpoint with the method provided.
Variable arguments that are unknown to the target endpoint are appended
to the generated URL as query arguments. If the value of a query argument
is `None`, the whole pair is skipped. In case blueprints are active
you can shortcut references to the same blueprint by prefixing the
local endpoint with a dot (``.``).
This will reference the index function local to the current blueprint::
url_for('.index')
For more information, head over to the :ref:`Quickstart <url-building>`.
To integrate applications, :class:`Flask` has a hook to intercept URL build
errors through :attr:`Flask.build_error_handler`. The `url_for` function
results in a :exc:`~werkzeug.routing.BuildError` when the current app does
not have a URL for the given endpoint and values. When it does, the
:data:`~flask.current_app` calls its :attr:`~Flask.build_error_handler` if
it is not `None`, which can return a string to use as the result of
`url_for` (instead of `url_for`'s default to raise the
:exc:`~werkzeug.routing.BuildError` exception) or re-raise the exception.
An example::
def external_url_handler(error, endpoint, **values):
"Looks up an external URL when `url_for` cannot build a URL."
# This is an example of hooking the build_error_handler.
# Here, lookup_url is some utility function you've built
# which looks up the endpoint in some external URL registry.
url = lookup_url(endpoint, **values)
if url is None:
# External lookup did not have a URL.
# Re-raise the BuildError, in context of original traceback.
exc_type, exc_value, tb = sys.exc_info()
if exc_value is error:
raise exc_type, exc_value, tb
else:
raise error
# url_for will use this result, instead of raising BuildError.
return url
app.build_error_handler = external_url_handler
Here, `error` is the instance of :exc:`~werkzeug.routing.BuildError`, and
`endpoint` and `**values` are the arguments passed into `url_for`. Note
that this is for building URLs outside the current application, and not for
handling 404 NotFound errors.
.. versionadded:: 0.10
The `_scheme` parameter was added.
.. versionadded:: 0.9
The `_anchor` and `_method` parameters were added.
.. versionadded:: 0.9
Calls :meth:`Flask.handle_build_error` on
:exc:`~werkzeug.routing.BuildError`.
:param endpoint: the endpoint of the URL (name of the function)
:param values: the variable arguments of the URL rule
:param _external: if set to `True`, an absolute URL is generated. Server
address can be changed via `SERVER_NAME` configuration variable which
defaults to `localhost`.
:param _scheme: a string specifying the desired URL scheme. The `_external`
parameter must be set to `True` or a `ValueError` is raised.
:param _anchor: if provided this is added as anchor to the URL.
:param _method: if provided this explicitly specifies an HTTP method.
"""
appctx = _app_ctx_stack.top
reqctx = _request_ctx_stack.top
if appctx is None:
raise RuntimeError('Attempted to generate a URL without the '
'application context being pushed. This has to be '
'executed when application context is available.')
# If request specific information is available we have some extra
# features that support "relative" urls.
if reqctx is not None:
url_adapter = reqctx.url_adapter
blueprint_name = request.blueprint
if not reqctx.request._is_old_module:
if endpoint[:1] == '.':
if blueprint_name is not None:
endpoint = blueprint_name + endpoint
else:
endpoint = endpoint[1:]
else:
# TODO: get rid of this deprecated functionality in 1.0
if '.' not in endpoint:
if blueprint_name is not None:
endpoint = blueprint_name + '.' + endpoint
elif endpoint.startswith('.'):
endpoint = endpoint[1:]
external = values.pop('_external', False)
# Otherwise go with the url adapter from the appctx and make
# the urls external by default.
else:
url_adapter = appctx.url_adapter
if url_adapter is None:
raise RuntimeError('Application was not able to create a URL '
'adapter for request independent URL generation. '
'You might be able to fix this by setting '
'the SERVER_NAME config variable.')
external = values.pop('_external', True)
anchor = values.pop('_anchor', None)
method = values.pop('_method', None)
scheme = values.pop('_scheme', None)
appctx.app.inject_url_defaults(endpoint, values)
if scheme is not None:
if not external:
raise ValueError('When specifying _scheme, _external must be True')
url_adapter.url_scheme = scheme
try:
rv = url_adapter.build(endpoint, values, method=method,
force_external=external)
except BuildError as error:
# We need to inject the values again so that the app callback can
# deal with that sort of stuff.
values['_external'] = external
values['_anchor'] = anchor
values['_method'] = method
return appctx.app.handle_url_build_error(error, endpoint, values)
if anchor is not None:
rv += '#' + url_quote(anchor)
return rv
def get_template_attribute(template_name, attribute):
"""Loads a macro (or variable) a template exports. This can be used to
invoke a macro from within Python code. If you for example have a
template named `_cider.html` with the following contents:
.. sourcecode:: html+jinja
{% macro hello(name) %}Hello {{ name }}!{% endmacro %}
You can access this from Python code like this::
hello = get_template_attribute('_cider.html', 'hello')
return hello('World')
.. versionadded:: 0.2
:param template_name: the name of the template
:param attribute: the name of the variable of macro to access
"""
return getattr(current_app.jinja_env.get_template(template_name).module,
attribute)
def flash(message, category='message'):
"""Flashes a message to the next request. In order to remove the
flashed message from the session and to display it to the user,
the template has to call :func:`get_flashed_messages`.
.. versionchanged:: 0.3
`category` parameter added.
:param message: the message to be flashed.
:param category: the category for the message. The following values
are recommended: ``'message'`` for any kind of message,
``'error'`` for errors, ``'info'`` for information
messages and ``'warning'`` for warnings. However any
kind of string can be used as category.
"""
# Original implementation:
#
# session.setdefault('_flashes', []).append((category, message))
#
# This assumed that changes made to mutable structures in the session are
# are always in sync with the sess on object, which is not true for session
# implementations that use external storage for keeping their keys/values.
flashes = session.get('_flashes', [])
flashes.append((category, message))
session['_flashes'] = flashes
message_flashed.send(current_app._get_current_object(),
message=message, category=category)
def get_flashed_messages(with_categories=False, category_filter=[]):
"""Pulls all flashed messages from the session and returns them.
Further calls in the same request to the function will return
the same messages. By default just the messages are returned,
but when `with_categories` is set to `True`, the return value will
be a list of tuples in the form ``(category, message)`` instead.
Filter the flashed messages to one or more categories by providing those
categories in `category_filter`. This allows rendering categories in
separate html blocks. The `with_categories` and `category_filter`
arguments are distinct:
* `with_categories` controls whether categories are returned with message
text (`True` gives a tuple, where `False` gives just the message text).
* `category_filter` filters the messages down to only those matching the
provided categories.
See :ref:`message-flashing-pattern` for examples.
.. versionchanged:: 0.3
`with_categories` parameter added.
.. versionchanged:: 0.9
`category_filter` parameter added.
:param with_categories: set to `True` to also receive categories.
:param category_filter: whitelist of categories to limit return values
"""
flashes = _request_ctx_stack.top.flashes
if flashes is None:
_request_ctx_stack.top.flashes = flashes = session.pop('_flashes') \
if '_flashes' in session else []
if category_filter:
flashes = list(filter(lambda f: f[0] in category_filter, flashes))
if not with_categories:
return [x[1] for x in flashes]
return flashes
def send_file(filename_or_fp, mimetype=None, as_attachment=False,
attachment_filename=None, add_etags=True,
cache_timeout=None, conditional=False):
"""Sends the contents of a file to the client. This will use the
most efficient method available and configured. By default it will
try to use the WSGI server's file_wrapper support. Alternatively
you can set the application's :attr:`~Flask.use_x_sendfile` attribute
to ``True`` to directly emit an `X-Sendfile` header. This however
requires support of the underlying webserver for `X-Sendfile`.
By default it will try to guess the mimetype for you, but you can
also explicitly provide one. For extra security you probably want
to send certain files as attachment (HTML for instance). The mimetype
guessing requires a `filename` or an `attachment_filename` to be
provided.
Please never pass filenames to this function from user sources without
checking them first. Something like this is usually sufficient to
avoid security problems::
if '..' in filename or filename.startswith('/'):
abort(404)
.. versionadded:: 0.2
.. versionadded:: 0.5
The `add_etags`, `cache_timeout` and `conditional` parameters were
added. The default behavior is now to attach etags.
.. versionchanged:: 0.7
mimetype guessing and etag support for file objects was
deprecated because it was unreliable. Pass a filename if you are
able to, otherwise attach an etag yourself. This functionality
will be removed in Flask 1.0
.. versionchanged:: 0.9
cache_timeout pulls its default from application config, when None.
:param filename_or_fp: the filename of the file to send. This is
relative to the :attr:`~Flask.root_path` if a
relative path is specified.
Alternatively a file object might be provided
in which case `X-Sendfile` might not work and
fall back to the traditional method. Make sure
that the file pointer is positioned at the start
of data to send before calling :func:`send_file`.
:param mimetype: the mimetype of the file if provided, otherwise
auto detection happens.
:param as_attachment: set to `True` if you want to send this file with
a ``Content-Disposition: attachment`` header.
:param attachment_filename: the filename for the attachment if it
differs from the file's filename.
:param add_etags: set to `False` to disable attaching of etags.
:param conditional: set to `True` to enable conditional responses.
:param cache_timeout: the timeout in seconds for the headers. When `None`
(default), this value is set by
:meth:`~Flask.get_send_file_max_age` of
:data:`~flask.current_app`.
"""
mtime = None
if isinstance(filename_or_fp, string_types):
filename = filename_or_fp
file = None
else:
from warnings import warn
file = filename_or_fp
filename = getattr(file, 'name', None)
# XXX: this behavior is now deprecated because it was unreliable.
# removed in Flask 1.0
if not attachment_filename and not mimetype \
and isinstance(filename, string_types):
warn(DeprecationWarning('The filename support for file objects '
'passed to send_file is now deprecated. Pass an '
'attach_filename if you want mimetypes to be guessed.'),
stacklevel=2)
if add_etags:
warn(DeprecationWarning('In future flask releases etags will no '
'longer be generated for file objects passed to the send_file '
'function because this behavior was unreliable. Pass '
'filenames instead if possible, otherwise attach an etag '
'yourself based on another value'), stacklevel=2)
if filename is not None:
if not os.path.isabs(filename):
filename = os.path.join(current_app.root_path, filename)
if mimetype is None and (filename or attachment_filename):
mimetype = mimetypes.guess_type(filename or attachment_filename)[0]
if mimetype is None:
mimetype = 'application/octet-stream'
headers = Headers()
if as_attachment:
if attachment_filename is None:
if filename is None:
raise TypeError('filename unavailable, required for '
'sending as attachment')
attachment_filename = os.path.basename(filename)
headers.add('Content-Disposition', 'attachment',
filename=attachment_filename)
if current_app.use_x_sendfile and filename:
if file is not None:
file.close()
headers['X-Sendfile'] = filename
headers['Content-Length'] = os.path.getsize(filename)
data = None
else:
if file is None:
file = open(filename, 'rb')
mtime = os.path.getmtime(filename)
headers['Content-Length'] = os.path.getsize(filename)
data = wrap_file(request.environ, file)
rv = current_app.response_class(data, mimetype=mimetype, headers=headers,
direct_passthrough=True)
# if we know the file modification date, we can store it as the
# the time of the last modification.
if mtime is not None:
rv.last_modified = int(mtime)
rv.cache_control.public = True
if cache_timeout is None:
cache_timeout = current_app.get_send_file_max_age(filename)
if cache_timeout is not None:
rv.cache_control.max_age = cache_timeout
rv.expires = int(time() + cache_timeout)
if add_etags and filename is not None:
rv.set_etag('flask-%s-%s-%s' % (
os.path.getmtime(filename),
os.path.getsize(filename),
adler32(
filename.encode('utf-8') if isinstance(filename, text_type)
else filename
) & 0xffffffff
))
if conditional:
rv = rv.make_conditional(request)
# make sure we don't send x-sendfile for servers that
# ignore the 304 status code for x-sendfile.
if rv.status_code == 304:
rv.headers.pop('x-sendfile', None)
return rv
def safe_join(directory, filename):
"""Safely join `directory` and `filename`.
Example usage::
@app.route('/wiki/<path:filename>')
def wiki_page(filename):
filename = safe_join(app.config['WIKI_FOLDER'], filename)
with open(filename, 'rb') as fd:
content = fd.read() # Read and process the file content...
:param directory: the base directory.
:param filename: the untrusted filename relative to that directory.
:raises: :class:`~werkzeug.exceptions.NotFound` if the resulting path
would fall out of `directory`.
"""
filename = posixpath.normpath(filename)
for sep in _os_alt_seps:
if sep in filename:
raise NotFound()
if os.path.isabs(filename) or \
filename == '..' or \
filename.startswith('../'):
raise NotFound()
return os.path.join(directory, filename)
def send_from_directory(directory, filename, **options):
"""Send a file from a given directory with :func:`send_file`. This
is a secure way to quickly expose static files from an upload folder
or something similar.
Example usage::
@app.route('/uploads/<path:filename>')
def download_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename, as_attachment=True)
.. admonition:: Sending files and Performance
It is strongly recommended to activate either `X-Sendfile` support in
your webserver or (if no authentication happens) to tell the webserver
to serve files for the given path on its own without calling into the
web application for improved performance.
.. versionadded:: 0.5
:param directory: the directory where all the files are stored.
:param filename: the filename relative to that directory to
download.
:param options: optional keyword arguments that are directly
forwarded to :func:`send_file`.
"""
filename = safe_join(directory, filename)
if not os.path.isfile(filename):
raise NotFound()
options.setdefault('conditional', True)
return send_file(filename, **options)
def get_root_path(import_name):
"""Returns the path to a package or cwd if that cannot be found. This
returns the path of a package or the folder that contains a module.
Not to be confused with the package path returned by :func:`find_package`.
"""
# Module already imported and has a file attribute. Use that first.
mod = sys.modules.get(import_name)
if mod is not None and hasattr(mod, '__file__'):
return os.path.dirname(os.path.abspath(mod.__file__))
# Next attempt: check the loader.
loader = pkgutil.get_loader(import_name)
# Loader does not exist or we're referring to an unloaded main module
# or a main module without path (interactive sessions), go with the
# current working directory.
if loader is None or import_name == '__main__':
return os.getcwd()
# For .egg, zipimporter does not have get_filename until Python 2.7.
# Some other loaders might exhibit the same behavior.
if hasattr(loader, 'get_filename'):
filepath = loader.get_filename(import_name)
else:
# Fall back to imports.
__import__(import_name)
filepath = sys.modules[import_name].__file__
# filepath is import_name.py for a module, or __init__.py for a package.
return os.path.dirname(os.path.abspath(filepath))
def find_package(import_name):
"""Finds a package and returns the prefix (or None if the package is
not installed) as well as the folder that contains the package or
module as a tuple. The package path returned is the module that would
have to be added to the pythonpath in order to make it possible to
import the module. The prefix is the path below which a UNIX like
folder structure exists (lib, share etc.).
"""
root_mod_name = import_name.split('.')[0]
loader = pkgutil.get_loader(root_mod_name)
if loader is None or import_name == '__main__':
# import name is not found, or interactive/main module
package_path = os.getcwd()
else:
# For .egg, zipimporter does not have get_filename until Python 2.7.
if hasattr(loader, 'get_filename'):
filename = loader.get_filename(root_mod_name)
elif hasattr(loader, 'archive'):
# zipimporter's loader.archive points to the .egg or .zip
# archive filename is dropped in call to dirname below.
filename = loader.archive
else:
# At least one loader is missing both get_filename and archive:
# Google App Engine's HardenedModulesHook
#
# Fall back to imports.
__import__(import_name)
filename = sys.modules[import_name].__file__
package_path = os.path.abspath(os.path.dirname(filename))
# package_path ends with __init__.py for a package
if loader.is_package(root_mod_name):
package_path = os.path.dirname(package_path)
site_parent, site_folder = os.path.split(package_path)
py_prefix = os.path.abspath(sys.prefix)
if package_path.startswith(py_prefix):
return py_prefix, package_path
elif site_folder.lower() == 'site-packages':
parent, folder = os.path.split(site_parent)
# Windows like installations
if folder.lower() == 'lib':
base_dir = parent
# UNIX like installations
elif os.path.basename(parent).lower() == 'lib':
base_dir = os.path.dirname(parent)
else:
base_dir = site_parent
return base_dir, package_path
return None, package_path
class locked_cached_property(object):
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value. Works like the one in Werkzeug but has a lock for
thread safety.
"""
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
self.lock = RLock()
def __get__(self, obj, type=None):
if obj is None:
return self
with self.lock:
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
class _PackageBoundObject(object):
def __init__(self, import_name, template_folder=None):
#: The name of the package or module. Do not change this once
#: it was set by the constructor.
self.import_name = import_name
#: location of the templates. `None` if templates should not be
#: exposed.
self.template_folder = template_folder
#: Where is the app root located?
self.root_path = get_root_path(self.import_name)
self._static_folder = None
self._static_url_path = None
def _get_static_folder(self):
if self._static_folder is not None:
return os.path.join(self.root_path, self._static_folder)
def _set_static_folder(self, value):
self._static_folder = value
static_folder = property(_get_static_folder, _set_static_folder)
del _get_static_folder, _set_static_folder
def _get_static_url_path(self):
if self._static_url_path is None:
if self.static_folder is None:
return None
return '/' + os.path.basename(self.static_folder)
return self._static_url_path
def _set_static_url_path(self, value):
self._static_url_path = value
static_url_path = property(_get_static_url_path, _set_static_url_path)
del _get_static_url_path, _set_static_url_path
@property
def has_static_folder(self):
"""This is `True` if the package bound object's container has a
folder named ``'static'``.
.. versionadded:: 0.5
"""
return self.static_folder is not None
@locked_cached_property
def jinja_loader(self):
"""The Jinja loader for this package bound object.
.. versionadded:: 0.5
"""
if self.template_folder is not None:
return FileSystemLoader(os.path.join(self.root_path,
self.template_folder))
def get_send_file_max_age(self, filename):
"""Provides default cache_timeout for the :func:`send_file` functions.
By default, this function returns ``SEND_FILE_MAX_AGE_DEFAULT`` from
the configuration of :data:`~flask.current_app`.
Static file functions such as :func:`send_from_directory` use this
function, and :func:`send_file` calls this function on
:data:`~flask.current_app` when the given cache_timeout is `None`. If a
cache_timeout is given in :func:`send_file`, that timeout is used;
otherwise, this method is called.
This allows subclasses to change the behavior when sending files based
on the filename. For example, to set the cache timeout for .js files
to 60 seconds::
class MyFlask(flask.Flask):
def get_send_file_max_age(self, name):
if name.lower().endswith('.js'):
return 60
return flask.Flask.get_send_file_max_age(self, name)
.. versionadded:: 0.9
"""
return current_app.config['SEND_FILE_MAX_AGE_DEFAULT']
def send_static_file(self, filename):
"""Function used internally to send static files from the static
folder to the browser.
.. versionadded:: 0.5
"""
if not self.has_static_folder:
raise RuntimeError('No static folder for this object')
# Ensure get_send_file_max_age is called in all cases.
# Here, we ensure get_send_file_max_age is called for Blueprints.
cache_timeout = self.get_send_file_max_age(filename)
return send_from_directory(self.static_folder, filename,
cache_timeout=cache_timeout)
def open_resource(self, resource, mode='rb'):
"""Opens a resource from the application's resource folder. To see
how this works, consider the following folder structure::
/myapplication.py
/schema.sql
/static
/style.css
/templates
/layout.html
/index.html
If you want to open the `schema.sql` file you would do the
following::
with app.open_resource('schema.sql') as f:
contents = f.read()
do_something_with(contents)
:param resource: the name of the resource. To access resources within
subfolders use forward slashes as separator.
:param mode: resource file opening mode, default is 'rb'.
"""
if mode not in ('r', 'rb'):
raise ValueError('Resources can only be opened for reading')
return open(os.path.join(self.root_path, resource), mode)
| mit |
arthurprs/aerospike-client-python | examples/admin/grant_roles.py | 2 | 3922 | # -*- coding: utf-8 -*-
################################################################################
# Copyright 2013-2015 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from __future__ import print_function
import aerospike
import sys
from optparse import OptionParser
################################################################################
# Options Parsing
################################################################################
usage = "usage: %prog [options]"
optparser = OptionParser(usage=usage, add_help_option=False)
optparser.add_option(
"--help", dest="help", action="store_true",
help="Displays this message.")
optparser.add_option(
"-h", "--host", dest="host", type="string", default="127.0.0.1", metavar="<ADDRESS>",
help="Address of Aerospike server.")
optparser.add_option(
"-p", "--port", dest="port", type="int", default=3000, metavar="<PORT>",
help="Port of the Aerospike server.")
optparser.add_option(
"-U", "--username", dest="username", type="string", metavar="<USERNAME>",
help="Username to connect to database.")
optparser.add_option(
"-P", "--password", dest="password", type="string", metavar="<PASSWORD>",
help="Password to connect to database.")
(options, args) = optparser.parse_args()
if options.help:
optparser.print_help()
print()
sys.exit(1)
if options.username == None or options.password == None:
optparser.print_help()
print()
sys.exit(1)
################################################################################
# Client Configuration
################################################################################
config = {
'hosts': [ (options.host, options.port) ]
}
################################################################################
# Application
################################################################################
exitCode = 0
try:
# ----------------------------------------------------------------------------
# Connect to Cluster
# ----------------------------------------------------------------------------
client = aerospike.client(config).connect(options.username, options.password)
# ----------------------------------------------------------------------------
# Perform Operation
# ----------------------------------------------------------------------------
try:
policy = {}
user = "foo-example"
roles = ["read-write", "user-admin"]
roles_size = len(roles)
client.admin_grant_roles(user, roles)
print("OK, new roles granted to 1 user")
except Exception as e:
print("error: {0}".format(e), file=sys.stderr)
print("In case of invalid user first create user by running create_user.py")
exitCode = 2
# ----------------------------------------------------------------------------
# Close Connection to Cluster
# ----------------------------------------------------------------------------
client.close()
except Exception, eargs:
print("error: {0}".format(eargs), file=sys.stderr)
exitCode = 3
################################################################################
# Exit
################################################################################
sys.exit(exitCode)
| apache-2.0 |
walteryang47/ovirt-engine | packaging/setup/plugins/ovirt-engine-common/base/core/postinstall.py | 8 | 3048 | #
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""post install config file plugin."""
import gettext
from otopi import constants as otopicons
from otopi import common, filetransaction, plugin, util
from ovirt_engine_setup import constants as osetupcons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""post install config file plugin."""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
def _init(self):
self.environment[osetupcons.CoreEnv.GENERATE_POSTINSTALL] = True
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
priority=plugin.Stages.PRIORITY_LAST,
condition=lambda self: self.environment[
osetupcons.CoreEnv.GENERATE_POSTINSTALL
],
)
def _misc(self):
self.logger.info(
_("Generating post install configuration file '{name}'").format(
name=osetupcons.FileLocations.OVIRT_SETUP_POST_INSTALL_CONFIG,
)
)
content = ['[environment:default]']
consts = []
for constobj in self.environment[
osetupcons.CoreEnv.SETUP_ATTRS_MODULES
]:
consts.extend(constobj.__dict__['__osetup_attrs__'])
for c in consts:
for k in c.__dict__.values():
if hasattr(k, '__osetup_attrs__'):
if k.__osetup_attrs__['postinstallfile']:
k = k.fget(None)
if k in self.environment:
v = self.environment[k]
content.append(
'%s=%s:%s' % (
k,
common.typeName(v),
'\n'.join(v) if isinstance(v, list)
else v,
)
)
self.environment[otopicons.CoreEnv.MAIN_TRANSACTION].append(
filetransaction.FileTransaction(
name=osetupcons.FileLocations.OVIRT_SETUP_POST_INSTALL_CONFIG,
content=content,
modifiedList=self.environment[
otopicons.CoreEnv.MODIFIED_FILES
],
)
)
# vim: expandtab tabstop=4 shiftwidth=4
| apache-2.0 |
minhnd/youtube-subtitle-downloader | youtubesub.py | 1 | 5521 | # -*- coding: utf-8 -*-
"""
Youtube Subtitle Downloader downloads subtitles from Youtube videos
(if those are present) and convert them to SRT format.
Usage: youtubesub.py [-h] [-l] [--language LANGUAGE] [--filename FILENAME]
[--filetype {srt,xml}]
url
positional arguments:
url URL of the Youtube video
optional arguments:
-h, --help show this help message and exit
-l, --list list all available languages
--language LANGUAGE the ISO language code
--filename FILENAME specify the name of subtitle
--filetype {srt,xml} specify the output type of subtitle
Example:
python youtubesub.py --filename subtitle --language en http://www.youtube.com/watch?v=5MgBikgcWnY
:copyright: (c) 2014 by Nguyen Dang Minh (www.minhnd.com)
:license: BSD, see LICENSE for more details.
"""
import urllib2
import urlparse
import argparse
import sys
import xml.etree.ElementTree as ET
class YoutubeSubDownloader():
video_id = None
subtitle = None
languages = {}
def __init__(self, url=None):
self.video_id = self.extractVideoID(url)
self.languages = self.getAvailableLanguages()
if self.languages == {}:
print "There's no subtitle"
sys.exit()
def extractVideoID(self, url=None):
"""
Examples:
- http://youtu.be/5MgBikgcWnY
- http://www.youtube.com/watch?v=5MgBikgcWnY&feature=feed
- http://www.youtube.com/embed/5MgBikgcWnY
- http://www.youtube.com/v/5MgBikgcWnY?version=3&hl=en_US
"""
url_data = urlparse.urlparse(url)
if url_data.hostname == 'youtu.be':
return url_data.path[1:]
if url_data.hostname in ('www.youtube.com', 'youtube.com'):
if url_data.path == '/watch':
query = urlparse.parse_qs(url_data.query)
return query['v'][0]
if url_data.path[:7] == '/embed/':
return url_data.path.split('/')[2]
if url_data.path[:3] == '/v/':
return url_data.path.split('/')[2]
return None
def download(self, language, filename, filetype):
"""Download subtitle of the selected language"""
if language not in self.languages.keys():
print "Theres's no subtitle in this language"
sys.exit()
url = "http://www.youtube.com/api/timedtext?v={0}&lang={1}".format(self.video_id, language)
self.subtitle = urllib2.urlopen(url)
if filetype == "srt":
self.writeSRTFile(filename)
else:
self.writeXMLFile(filename)
def getAvailableLanguages(self):
"""Get all available languages of subtitle"""
url = "http://www.youtube.com/api/timedtext?v=%s&type=list" % self.video_id
xml = urllib2.urlopen(url)
tree = ET.parse(xml)
root = tree.getroot()
languages = {}
for child in root:
languages[child.attrib["lang_code"]] = child.attrib["lang_translated"]
return languages
def list(self):
"""List all available languages of subtitle"""
for key, value in self.languages.iteritems():
print key, value
def writeXMLFile(self, filename=None):
with open(filename + ".xml", 'w') as f:
for line in self.subtitle:
f.write(line)
def writeSRTFile(self, filename=None):
tree = ET.parse(self.subtitle)
root = tree.getroot()
with open(filename + ".srt", 'w') as f:
line = 1
for child in root:
f.write(self.printSRTLine(line, child.attrib["start"], child.attrib["dur"], child.text.encode('utf-8')))
line += 1
def formatSRTTime(self, secTime):
"""Convert a time in seconds (in Google's subtitle) to SRT time format"""
sec, micro = str(secTime).split('.')
m, s = divmod(int(sec), 60)
h, m = divmod(m, 60)
return "{:02}:{:02}:{:02},{}".format(h,m,s,micro)
def printSRTLine(self, line, start, duration, text):
"""Print a subtitle in SRT format"""
end = self.formatSRTTime(float(start) + float(duration))
start = self.formatSRTTime(start)
text = self.convertHTML(text)
return "{}\n{} --> {}\n{}\n\n".format(line, start, end, text)
def convertHTML(self, text):
"""A few HTML encodings replacements.
' to '
"""
return text.replace(''', "'")
def main():
try:
parser = argparse.ArgumentParser(description="Youtube Subtitle Downloader")
parser.add_argument("url", help="URL of the Youtube video")
parser.add_argument("-l", "--list", action="store_true", help="list all available languages")
parser.add_argument("--language", default="en", help="the ISO language code")
parser.add_argument("--filename", default="subtitle", help="specify the name of subtitle")
parser.add_argument("--filetype", default="srt", choices=["srt", "xml"], help="specify the output type of subtitle")
args = parser.parse_args()
downloader = YoutubeSubDownloader(args.url)
if args.list:
print "Available languages:"
f = downloader.list()
downloader.download(args.language, args.filename, args.filetype)
except Exception as e:
print e
if __name__ == '__main__':
main()
| bsd-2-clause |
xclxxl414/rqalpha | rqalpha/mod/rqalpha_mod_sys_risk/price_validator.py | 3 | 1892 | # -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rqalpha.interface import AbstractFrontendValidator
from rqalpha.const import ORDER_TYPE
from rqalpha.utils.i18n import gettext as _
class PriceValidator(AbstractFrontendValidator):
def __init__(self, env):
self._env = env
def can_submit_order(self, account, order):
if order.type != ORDER_TYPE.LIMIT:
return True
limit_up = self._env.price_board.get_limit_up(order.order_book_id)
if order.price > limit_up:
reason = _(
"Order Rejected: limit order price {limit_price} is higher than limit up {limit_up}."
).format(
limit_price=order.price,
limit_up=limit_up
)
order.mark_rejected(reason)
return False
limit_down = self._env.price_board.get_limit_down(order.order_book_id)
if order.price < limit_down:
reason = _(
"Order Rejected: limit order price {limit_price} is lower than limit down {limit_down}."
).format(
limit_price=order.price,
limit_down=limit_down
)
order.mark_rejected(reason)
return False
return True
def can_cancel_order(self, account, order):
return True
| apache-2.0 |
tcharding/kubernetes | hack/verify-flags-underscore.py | 205 | 4659 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
args = parser.parse_args()
# Cargo culted from http://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python
def is_binary(pathname):
"""Return true if the given filename is binary.
@raise EnvironmentError: if the file does not exist or cannot be accessed.
@attention: found @ http://bytes.com/topic/python/answers/21222-determine-file-type-binary-text on 6/08/2010
@author: Trent Mick <TrentM@ActiveState.com>
@author: Jorge Orpinel <jorge@orpinel.com>"""
try:
with open(pathname, 'r') as f:
CHUNKSIZE = 1024
while 1:
chunk = f.read(CHUNKSIZE)
if '\0' in chunk: # found null byte
return True
if len(chunk) < CHUNKSIZE:
break # done
except:
return True
return False
def get_all_files(rootdir):
all_files = []
for root, dirs, files in os.walk(rootdir):
# don't visit certain dirs
if 'vendor' in dirs:
dirs.remove('vendor')
if 'staging' in dirs:
dirs.remove('staging')
if '_output' in dirs:
dirs.remove('_output')
if '_gopath' in dirs:
dirs.remove('_gopath')
if 'third_party' in dirs:
dirs.remove('third_party')
if '.git' in dirs:
dirs.remove('.git')
if '.make' in dirs:
dirs.remove('.make')
if 'BUILD' in files:
files.remove('BUILD')
for name in files:
pathname = os.path.join(root, name)
if is_binary(pathname):
continue
all_files.append(pathname)
return all_files
# Collects all the flags used in golang files and verifies the flags do
# not contain underscore. If any flag needs to be excluded from this check,
# need to add that flag in hack/verify-flags/excluded-flags.txt.
def check_underscore_in_flags(rootdir, files):
# preload the 'known' flags which don't follow the - standard
pathname = os.path.join(rootdir, "hack/verify-flags/excluded-flags.txt")
f = open(pathname, 'r')
excluded_flags = set(f.read().splitlines())
f.close()
regexs = [ re.compile('Var[P]?\([^,]*, "([^"]*)"'),
re.compile('.String[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Int[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Bool[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Duration[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.StringSlice[P]?\("([^"]*)",[^,]+,[^)]+\)') ]
new_excluded_flags = set()
# walk all the files looking for any flags being declared
for pathname in files:
if not pathname.endswith(".go"):
continue
f = open(pathname, 'r')
data = f.read()
f.close()
matches = []
for regex in regexs:
matches = matches + regex.findall(data)
for flag in matches:
if any(x in flag for x in excluded_flags):
continue
if "_" in flag:
new_excluded_flags.add(flag)
if len(new_excluded_flags) != 0:
print("Found a flag declared with an _ but which is not explicitly listed as a valid flag name in hack/verify-flags/excluded-flags.txt")
print("Are you certain this flag should not have been declared with an - instead?")
l = list(new_excluded_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
def main():
rootdir = os.path.dirname(__file__) + "/../"
rootdir = os.path.abspath(rootdir)
if len(args.filenames) > 0:
files = args.filenames
else:
files = get_all_files(rootdir)
check_underscore_in_flags(rootdir, files)
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
scollis/iris | lib/iris/tests/unit/plot/test_points.py | 1 | 1395 | # (C) British Crown Copyright 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.plot.points` function."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from iris.tests.unit.plot import TestGraphicStringCoord
if tests.MPL_AVAILABLE:
import iris.plot as iplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
iplt.points(self.cube, coords=('bar', 'str_coord'))
self.assertBoundsTickLabels('yaxis')
def test_xaxis_labels(self):
iplt.points(self.cube, coords=('str_coord', 'bar'))
self.assertBoundsTickLabels('xaxis')
if __name__ == "__main__":
tests.main()
| gpl-3.0 |
SteveHNH/ansible | lib/ansible/modules/cloud/ovirt/ovirt_tags.py | 75 | 7807 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_tags
short_description: Module to manage tags in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "This module manage tags in oVirt/RHV. It can also manage assignments
of those tags to entities."
options:
name:
description:
- "Name of the tag to manage."
required: true
state:
description:
- "Should the tag be present/absent/attached/detached."
- "C(Note): I(attached) and I(detached) states are supported since version 2.4."
choices: ['present', 'absent', 'attached', 'detached']
default: present
description:
description:
- "Description of the tag to manage."
parent:
description:
- "Name of the parent tag."
vms:
description:
- "List of the VMs names, which should have assigned this tag."
hosts:
description:
- "List of the hosts names, which should have assigned this tag."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create(if not exists) and assign tag to vms vm1 and vm2:
- ovirt_tags:
name: mytag
vms:
- vm1
- vm2
# Attach a tag to VM 'vm1', keeping the rest already attached tags on VM:
- ovirt_tags:
name: mytag
state: attached
vms:
- vm3
# Detach a tag from VM 'vm1', keeping the rest already attached tags on VM:
- ovirt_tags:
name: mytag
state: detached
vms:
- vm3
# To detach all VMs from tag:
- ovirt_tags:
name: mytag
vms: []
# Remove tag
- ovirt_tags:
state: absent
name: mytag
'''
RETURN = '''
id:
description: ID of the tag which is managed
returned: On success if tag is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
tag:
description: "Dictionary of all the tag attributes. Tag attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/tag."
returned: On success if tag is found.
type: dict
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
get_id_by_name,
ovirt_full_argument_spec,
)
class TagsModule(BaseModule):
def build_entity(self):
return otypes.Tag(
name=self._module.params['name'],
description=self._module.params['description'],
parent=otypes.Tag(
name=self._module.params['parent'],
) if self._module.params['parent'] else None,
)
def post_create(self, entity):
self.update_check(entity)
def _update_tag_assignments(self, entity, name):
if self._module.params[name] is None:
return
state = self.param('state')
entities_service = getattr(self._connection.system_service(), '%s_service' % name)()
current_vms = [
vm.name
for vm in entities_service.list(search='tag=%s' % self._module.params['name'])
]
# Assign tags:
if state in ['present', 'attached', 'detached']:
for entity_name in self._module.params[name]:
entity_id = get_id_by_name(entities_service, entity_name)
tags_service = entities_service.service(entity_id).tags_service()
current_tags = [tag.name for tag in tags_service.list()]
# Assign the tag:
if state in ['attached', 'present']:
if self._module.params['name'] not in current_tags:
if not self._module.check_mode:
tags_service.add(
tag=otypes.Tag(
name=self._module.params['name'],
),
)
self.changed = True
# Detach the tag:
elif state == 'detached':
if self._module.params['name'] in current_tags:
tag_id = get_id_by_name(tags_service, self.param('name'))
if not self._module.check_mode:
tags_service.tag_service(tag_id).remove()
self.changed = True
# Unassign tags:
if state == 'present':
for entity_name in [e for e in current_vms if e not in self._module.params[name]]:
if not self._module.check_mode:
entity_id = get_id_by_name(entities_service, entity_name)
tags_service = entities_service.service(entity_id).tags_service()
tag_id = get_id_by_name(tags_service, self.param('name'))
tags_service.tag_service(tag_id).remove()
self.changed = True
def _get_parent(self, entity):
parent = None
if entity.parent:
parent = self._connection.follow_link(entity.parent).name
return parent
def update_check(self, entity):
self._update_tag_assignments(entity, 'vms')
self._update_tag_assignments(entity, 'hosts')
return (
equal(self._module.params.get('description'), entity.description) and
equal(self._module.params.get('parent'), self._get_parent(entity))
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent', 'attached', 'detached'],
default='present',
),
name=dict(default=None, required=True),
description=dict(default=None),
parent=dict(default=None),
vms=dict(default=None, type='list'),
hosts=dict(default=None, type='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
tags_service = connection.system_service().tags_service()
tags_module = TagsModule(
connection=connection,
module=module,
service=tags_service,
)
state = module.params['state']
if state in ['present', 'attached', 'detached']:
ret = tags_module.create()
elif state == 'absent':
ret = tags_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
benjaminjkraft/django | tests/middleware/test_security.py | 291 | 7781 | from django.http import HttpResponse
from django.test import RequestFactory, SimpleTestCase
from django.test.utils import override_settings
class SecurityMiddlewareTest(SimpleTestCase):
@property
def middleware(self):
from django.middleware.security import SecurityMiddleware
return SecurityMiddleware()
@property
def secure_request_kwargs(self):
return {"wsgi.url_scheme": "https"}
def response(self, *args, **kwargs):
headers = kwargs.pop("headers", {})
response = HttpResponse(*args, **kwargs)
for k, v in headers.items():
response[k] = v
return response
def process_response(self, *args, **kwargs):
request_kwargs = {}
if kwargs.pop("secure", False):
request_kwargs.update(self.secure_request_kwargs)
request = (kwargs.pop("request", None) or
self.request.get("/some/url", **request_kwargs))
ret = self.middleware.process_request(request)
if ret:
return ret
return self.middleware.process_response(
request, self.response(*args, **kwargs))
request = RequestFactory()
def process_request(self, method, *args, **kwargs):
if kwargs.pop("secure", False):
kwargs.update(self.secure_request_kwargs)
req = getattr(self.request, method.lower())(*args, **kwargs)
return self.middleware.process_request(req)
@override_settings(SECURE_HSTS_SECONDS=3600)
def test_sts_on(self):
"""
With HSTS_SECONDS=3600, the middleware adds
"strict-transport-security: max-age=3600" to the response.
"""
self.assertEqual(
self.process_response(secure=True)["strict-transport-security"],
"max-age=3600")
@override_settings(SECURE_HSTS_SECONDS=3600)
def test_sts_already_present(self):
"""
The middleware will not override a "strict-transport-security" header
already present in the response.
"""
response = self.process_response(
secure=True,
headers={"strict-transport-security": "max-age=7200"})
self.assertEqual(response["strict-transport-security"], "max-age=7200")
@override_settings(HSTS_SECONDS=3600)
def test_sts_only_if_secure(self):
"""
The "strict-transport-security" header is not added to responses going
over an insecure connection.
"""
self.assertNotIn("strict-transport-security", self.process_response(secure=False))
@override_settings(HSTS_SECONDS=0)
def test_sts_off(self):
"""
With HSTS_SECONDS of 0, the middleware does not add a
"strict-transport-security" header to the response.
"""
self.assertNotIn("strict-transport-security", self.process_response(secure=True))
@override_settings(
SECURE_HSTS_SECONDS=600, SECURE_HSTS_INCLUDE_SUBDOMAINS=True)
def test_sts_include_subdomains(self):
"""
With HSTS_SECONDS non-zero and HSTS_INCLUDE_SUBDOMAINS
True, the middleware adds a "strict-transport-security" header with the
"includeSubDomains" tag to the response.
"""
response = self.process_response(secure=True)
self.assertEqual(
response["strict-transport-security"],
"max-age=600; includeSubDomains",
)
@override_settings(
SECURE_HSTS_SECONDS=600, SECURE_HSTS_INCLUDE_SUBDOMAINS=False)
def test_sts_no_include_subdomains(self):
"""
With HSTS_SECONDS non-zero and HSTS_INCLUDE_SUBDOMAINS
False, the middleware adds a "strict-transport-security" header without
the "includeSubDomains" tag to the response.
"""
response = self.process_response(secure=True)
self.assertEqual(response["strict-transport-security"], "max-age=600")
@override_settings(SECURE_CONTENT_TYPE_NOSNIFF=True)
def test_content_type_on(self):
"""
With CONTENT_TYPE_NOSNIFF set to True, the middleware adds
"x-content-type-options: nosniff" header to the response.
"""
self.assertEqual(self.process_response()["x-content-type-options"], "nosniff")
@override_settings(SECURE_CONTENT_TYPE_NO_SNIFF=True)
def test_content_type_already_present(self):
"""
The middleware will not override an "x-content-type-options" header
already present in the response.
"""
response = self.process_response(secure=True, headers={"x-content-type-options": "foo"})
self.assertEqual(response["x-content-type-options"], "foo")
@override_settings(SECURE_CONTENT_TYPE_NOSNIFF=False)
def test_content_type_off(self):
"""
With CONTENT_TYPE_NOSNIFF False, the middleware does not add an
"x-content-type-options" header to the response.
"""
self.assertNotIn("x-content-type-options", self.process_response())
@override_settings(SECURE_BROWSER_XSS_FILTER=True)
def test_xss_filter_on(self):
"""
With BROWSER_XSS_FILTER set to True, the middleware adds
"s-xss-protection: 1; mode=block" header to the response.
"""
self.assertEqual(
self.process_response()["x-xss-protection"],
"1; mode=block")
@override_settings(SECURE_BROWSER_XSS_FILTER=True)
def test_xss_filter_already_present(self):
"""
The middleware will not override an "x-xss-protection" header
already present in the response.
"""
response = self.process_response(secure=True, headers={"x-xss-protection": "foo"})
self.assertEqual(response["x-xss-protection"], "foo")
@override_settings(BROWSER_XSS_FILTER=False)
def test_xss_filter_off(self):
"""
With BROWSER_XSS_FILTER set to False, the middleware does not add an
"x-xss-protection" header to the response.
"""
self.assertNotIn("x-xss-protection", self.process_response())
@override_settings(SECURE_SSL_REDIRECT=True)
def test_ssl_redirect_on(self):
"""
With SSL_REDIRECT True, the middleware redirects any non-secure
requests to the https:// version of the same URL.
"""
ret = self.process_request("get", "/some/url?query=string")
self.assertEqual(ret.status_code, 301)
self.assertEqual(
ret["Location"], "https://testserver/some/url?query=string")
@override_settings(SECURE_SSL_REDIRECT=True)
def test_no_redirect_ssl(self):
"""
The middleware does not redirect secure requests.
"""
ret = self.process_request("get", "/some/url", secure=True)
self.assertEqual(ret, None)
@override_settings(
SECURE_SSL_REDIRECT=True, SECURE_REDIRECT_EXEMPT=["^insecure/"])
def test_redirect_exempt(self):
"""
The middleware does not redirect requests with URL path matching an
exempt pattern.
"""
ret = self.process_request("get", "/insecure/page")
self.assertEqual(ret, None)
@override_settings(
SECURE_SSL_REDIRECT=True, SECURE_SSL_HOST="secure.example.com")
def test_redirect_ssl_host(self):
"""
The middleware redirects to SSL_HOST if given.
"""
ret = self.process_request("get", "/some/url")
self.assertEqual(ret.status_code, 301)
self.assertEqual(ret["Location"], "https://secure.example.com/some/url")
@override_settings(SECURE_SSL_REDIRECT=False)
def test_ssl_redirect_off(self):
"""
With SSL_REDIRECT False, the middleware does no redirect.
"""
ret = self.process_request("get", "/some/url")
self.assertEqual(ret, None)
| bsd-3-clause |
mjg2203/edx-platform-seas | lms/djangoapps/courseware/management/commands/tests/test_dump_course.py | 16 | 7249 | """Tests for Django management commands"""
import json
import shutil
from StringIO import StringIO
import tarfile
from tempfile import mkdtemp
from path import path
from django.core.management import call_command
from django.test.utils import override_settings
from django.test.testcases import TestCase
from courseware.tests.modulestore_config import TEST_DATA_XML_MODULESTORE
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from courseware.tests.modulestore_config import TEST_DATA_MONGO_MODULESTORE
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.xml_importer import import_from_xml
DATA_DIR = 'common/test/data/'
TEST_COURSE_ID = 'edX/simple/2012_Fall'
class CommandsTestBase(TestCase):
"""
Base class for testing different django commands.
Must be subclassed using override_settings set to the modulestore
to be tested.
"""
def setUp(self):
self.loaded_courses = self.load_courses()
def load_courses(self):
"""Load test courses and return list of ids"""
store = modulestore()
courses = store.get_courses()
if TEST_COURSE_ID not in [c.id for c in courses]:
import_from_xml(store, DATA_DIR, ['toy', 'simple'])
return [course.id for course in store.get_courses()]
def call_command(self, name, *args, **kwargs):
"""Call management command and return output"""
out = StringIO() # To Capture the output of the command
call_command(name, *args, stdout=out, **kwargs)
out.seek(0)
return out.read()
def test_dump_course_ids(self):
kwargs = {'modulestore': 'default'}
output = self.call_command('dump_course_ids', **kwargs)
dumped_courses = output.strip().split('\n')
self.assertEqual(self.loaded_courses, dumped_courses)
def test_dump_course_structure(self):
args = [TEST_COURSE_ID]
kwargs = {'modulestore': 'default'}
output = self.call_command('dump_course_structure', *args, **kwargs)
dump = json.loads(output)
# check that all elements in the course structure have metadata,
# but not inherited metadata:
for element_name in dump:
element = dump[element_name]
self.assertIn('metadata', element)
self.assertIn('children', element)
self.assertIn('category', element)
self.assertNotIn('inherited_metadata', element)
# Check a few elements in the course dump
parent_id = 'i4x://edX/simple/chapter/Overview'
self.assertEqual(dump[parent_id]['category'], 'chapter')
self.assertEqual(len(dump[parent_id]['children']), 3)
child_id = dump[parent_id]['children'][1]
self.assertEqual(dump[child_id]['category'], 'videosequence')
self.assertEqual(len(dump[child_id]['children']), 2)
video_id = 'i4x://edX/simple/video/Welcome'
self.assertEqual(dump[video_id]['category'], 'video')
self.assertEqual(len(dump[video_id]['metadata']), 4)
self.assertIn('youtube_id_1_0', dump[video_id]['metadata'])
# Check if there are the right number of elements
self.assertEqual(len(dump), 16)
def test_dump_inherited_course_structure(self):
args = [TEST_COURSE_ID]
kwargs = {'modulestore': 'default', 'inherited': True}
output = self.call_command('dump_course_structure', *args, **kwargs)
dump = json.loads(output)
# check that all elements in the course structure have inherited metadata,
# and that it contains a particular value as well:
for element_name in dump:
element = dump[element_name]
self.assertIn('metadata', element)
self.assertIn('children', element)
self.assertIn('category', element)
self.assertIn('inherited_metadata', element)
self.assertIsNone(element['inherited_metadata']['ispublic'])
# ... but does not contain inherited metadata containing a default value:
self.assertNotIn('due', element['inherited_metadata'])
def test_dump_inherited_course_structure_with_defaults(self):
args = [TEST_COURSE_ID]
kwargs = {'modulestore': 'default', 'inherited': True, 'inherited_defaults': True}
output = self.call_command('dump_course_structure', *args, **kwargs)
dump = json.loads(output)
# check that all elements in the course structure have inherited metadata,
# and that it contains a particular value as well:
for element_name in dump:
element = dump[element_name]
self.assertIn('metadata', element)
self.assertIn('children', element)
self.assertIn('category', element)
self.assertIn('inherited_metadata', element)
self.assertIsNone(element['inherited_metadata']['ispublic'])
# ... and contains inherited metadata containing a default value:
self.assertIsNone(element['inherited_metadata']['due'])
def test_export_course(self):
tmp_dir = path(mkdtemp())
filename = tmp_dir / 'test.tar.gz'
try:
self.run_export_course(filename)
with tarfile.open(filename) as tar_file:
self.check_export_file(tar_file)
finally:
shutil.rmtree(tmp_dir)
def test_export_course_stdout(self):
output = self.run_export_course('-')
with tarfile.open(fileobj=StringIO(output)) as tar_file:
self.check_export_file(tar_file)
def run_export_course(self, filename): # pylint: disable=missing-docstring
args = ['edX/simple/2012_Fall', filename]
kwargs = {'modulestore': 'default'}
return self.call_command('export_course', *args, **kwargs)
def check_export_file(self, tar_file): # pylint: disable=missing-docstring
names = tar_file.getnames()
# Check if some of the files are present.
# The rest is of the code should be covered by the tests for
# xmodule.modulestore.xml_exporter, used by the dump_course command
assert_in = self.assertIn
assert_in('edX-simple-2012_Fall', names)
assert_in('edX-simple-2012_Fall/policies/2012_Fall/policy.json', names)
assert_in('edX-simple-2012_Fall/html/toylab.html', names)
assert_in('edX-simple-2012_Fall/videosequence/A_simple_sequence.xml', names)
assert_in('edX-simple-2012_Fall/sequential/Lecture_2.xml', names)
@override_settings(MODULESTORE=TEST_DATA_XML_MODULESTORE)
class CommandsXMLTestCase(CommandsTestBase, ModuleStoreTestCase):
"""
Test case for management commands using the xml modulestore.
"""
@override_settings(MODULESTORE=TEST_DATA_MONGO_MODULESTORE)
class CommandsMongoTestCase(CommandsTestBase, ModuleStoreTestCase):
"""
Test case for management commands using the mongo modulestore.
"""
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class CommandsMixedTestCase(CommandsTestBase, ModuleStoreTestCase):
"""
Test case for management commands. Using the mixed modulestore.
"""
| agpl-3.0 |
JackPrice/ansible-modules-extras | cloud/amazon/cloudtrail.py | 8 | 8416 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: cloudtrail
short_description: manage CloudTrail creation and deletion
description:
- Creates or deletes CloudTrail configuration. Ensures logging is also enabled. This module has a dependency on python-boto >= 2.21.
version_added: "2.0"
author: Ted Timmons
options:
state:
description:
- add or remove CloudTrail configuration.
required: true
choices: ['enabled', 'disabled']
name:
description:
- name for given CloudTrail configuration.
- This is a primary key and is used to identify the configuration.
s3_bucket_prefix:
description:
- bucket to place CloudTrail in.
- this bucket should exist and have the proper policy. See U(http://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html)
- required when state=enabled.
required: false
s3_key_prefix:
description:
- prefix to keys in bucket. A trailing slash is not necessary and will be removed.
required: false
include_global_events:
description:
- record API calls from global services such as IAM and STS?
required: false
default: false
choices: ["true", "false"]
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
version_added: "1.5"
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
version_added: "1.5"
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
version_added: "1.5"
extends_documentation_fragment: aws
"""
EXAMPLES = """
- name: enable cloudtrail
local_action: cloudtrail
state=enabled name=main s3_bucket_name=ourbucket
s3_key_prefix=cloudtrail region=us-east-1
- name: enable cloudtrail with different configuration
local_action: cloudtrail
state=enabled name=main s3_bucket_name=ourbucket2
s3_key_prefix='' region=us-east-1
- name: remove cloudtrail
local_action: cloudtrail state=absent name=main region=us-east-1
"""
import time
import sys
import os
from collections import Counter
boto_import_failed = False
try:
import boto
import boto.cloudtrail
from boto.regioninfo import RegionInfo
except ImportError:
boto_import_failed = True
class CloudTrailManager:
"""Handles cloudtrail configuration"""
def __init__(self, module, region=None, **aws_connect_params):
self.module = module
self.region = region
self.aws_connect_params = aws_connect_params
self.changed = False
try:
self.conn = connect_to_aws(boto.cloudtrail, self.region, **self.aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
self.module.fail_json(msg=str(e))
def view_status(self, name):
return self.conn.get_trail_status(name)
def view(self, name):
ret = self.conn.describe_trails(trail_name_list=[name])
trailList = ret.get('trailList', [])
if len(trailList) == 1:
return trailList[0]
return None
def exists(self, name=None):
ret = self.view(name)
if ret:
return True
return False
def enable_logging(self, name):
'''Turn on logging for a cloudtrail that already exists. Throws Exception on error.'''
self.conn.start_logging(name)
def enable(self, **create_args):
return self.conn.create_trail(**create_args)
def update(self, **create_args):
return self.conn.update_trail(**create_args)
def delete(self, name):
'''Delete a given cloudtrial configuration. Throws Exception on error.'''
self.conn.delete_trail(name)
def main():
if not has_libcloud:
module.fail_json(msg='boto is required.')
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state={'required': True, 'choices': ['enabled', 'disabled'] },
name={'required': True, 'type': 'str' },
s3_bucket_name={'required': False, 'type': 'str' },
s3_key_prefix={'default':'', 'required': False, 'type': 'str' },
include_global_events={'default':True, 'required': False, 'type': 'bool' },
))
required_together = ( ['state', 's3_bucket_name'] )
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together)
ec2_url, access_key, secret_key, region = get_ec2_creds(module)
aws_connect_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
ct_name = module.params['name']
s3_bucket_name = module.params['s3_bucket_name']
# remove trailing slash from the key prefix, really messes up the key structure.
s3_key_prefix = module.params['s3_key_prefix'].rstrip('/')
include_global_events = module.params['include_global_events']
#if module.params['state'] == 'present' and 'ec2_elbs' not in module.params:
# module.fail_json(msg="ELBs are required for registration or viewing")
cf_man = CloudTrailManager(module, region=region, **aws_connect_params)
results = { 'changed': False }
if module.params['state'] == 'enabled':
results['exists'] = cf_man.exists(name=ct_name)
if results['exists']:
results['view'] = cf_man.view(ct_name)
# only update if the values have changed.
if results['view']['S3BucketName'] != s3_bucket_name or \
results['view']['S3KeyPrefix'] != s3_key_prefix or \
results['view']['IncludeGlobalServiceEvents'] != include_global_events:
if not module.check_mode:
results['update'] = cf_man.update(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events)
results['changed'] = True
else:
if not module.check_mode:
# doesn't exist. create it.
results['enable'] = cf_man.enable(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events)
results['changed'] = True
# given cloudtrail should exist now. Enable the logging.
results['view_status'] = cf_man.view_status(ct_name)
results['was_logging_enabled'] = results['view_status'].get('IsLogging', False)
if not results['was_logging_enabled']:
if not module.check_mode:
cf_man.enable_logging(ct_name)
results['logging_enabled'] = True
results['changed'] = True
# delete the cloudtrai
elif module.params['state'] == 'disabled':
# check to see if it exists before deleting.
results['exists'] = cf_man.exists(name=ct_name)
if results['exists']:
# it exists, so we should delete it and mark changed.
if not module.check_mode:
cf_man.delete(ct_name)
results['changed'] = True
module.exit_json(**results)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
thomasorb/orb | orb/utils/io.py | 1 | 33933 | #!/usr/bin/python
# *-* coding: utf-8 *-*
# Author: Thomas Martin <thomas.martin.1@ulaval.ca>
# File: io.py
## Copyright (c) 2010-2020 Thomas Martin <thomas.martin.1@ulaval.ca>
##
## This file is part of ORB
##
## ORB is free software: you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## ORB is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
##
## You should have received a copy of the GNU General Public License
## along with ORB. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import numpy as np
import time
import warnings
import astropy.io.fits as pyfits
from astropy.io.fits.verify import VerifyWarning, VerifyError, AstropyUserWarning
from astropy.wcs import FITSFixedWarning
import astropy.io.votable
import pandas as pd
import orb.cutils
import h5py
import datetime
import orb.utils.validate
def open_file(file_name, mode='r'):
"""Open a file in write mode (by default) and return a file
object.
Create the file if it doesn't exist (only in write mode).
:param file_name: Path to the file, can be either
relative or absolute.
:param mode: (Optional) Can be 'w' for write mode, 'r' for
read mode and 'a' for append mode.
"""
if mode not in ['w','r','a']:
raise Exception("mode option must be 'w', 'r' or 'a'")
if mode in ['w','a']:
# create folder if it does not exist
dirname = os.path.dirname(file_name)
if dirname != '':
if not os.path.exists(dirname):
os.makedirs(dirname)
return open(file_name, mode)
def write_fits(fits_path, fits_data, fits_header=None,
silent=False, overwrite=True, mask=None,
replace=False, record_stats=False, mask_path=None):
"""Write data in FITS format. If the file doesn't exist create
it with its directories.
If the file already exists add a number to its name before the
extension (unless 'overwrite' option is set to True).
:param fits_path: Path to the file, can be either
relative or absolut.
:param fits_data: Data to be written in the file.
:param fits_header: (Optional) Optional keywords to update or
create. It can be a pyfits.Header() instance or a list of
tuples [(KEYWORD_1, VALUE_1, COMMENT_1), (KEYWORD_2,
VALUE_2, COMMENT_2), ...]. Standard keywords like SIMPLE,
BITPIX, NAXIS, EXTEND does not have to be passed.
:param silent: (Optional) If True turn this function won't
display any message (default False)
:param overwrite: (Optional) If True overwrite the output file
if it exists (default True).
:param mask: (Optional) It not None must be an array with the
same size as the given data but filled with ones and
zeros. Bad values (NaN or Inf) are converted to 1 and the
array is converted to 8 bit unsigned integers (uint8). This
array will be written to the disk with the same path
terminated by '_mask'. The header of the mask FITS file will
be the same as the original data (default None).
:param replace: (Optional) If True and if the file already
exist, new data replace old data in the existing file. NaN
values do not replace old values. Other values replace old
values. New array MUST have the same size as the existing
array. Note that if replace is True, overwrite is
automatically set to True.
:param record_stats: (Optional) If True, record mean and
median of data. Useful if they often have to be computed
(default False).
:param mask_path: (Optional) Path to the corresponding mask image.
.. note:: float64 data is converted to float32 data to avoid
too big files with unnecessary precision
.. note:: Please refer to
http://www.stsci.edu/institute/software_hardware/pyfits/ for
more information on PyFITS module and
http://fits.gsfc.nasa.gov/ for more information on FITS
files.
"""
SECURED_KEYS = ['SIMPLE', 'BITPIX', 'NAXIS', 'NAXIS1',
'NAXIS2', 'NAXIS3', 'EXTEND', 'INHERIT',
'BZERO', 'BSCALE']
if not isinstance(fits_data, np.ndarray):
raise TypeError('Data type must be numpy.ndarray')
start_time = time.time()
# change extension if nescessary
if os.path.splitext(fits_path)[1] != '.fits':
fits_path = os.path.splitext(fits_path)[0] + '.fits'
if mask is not None:
if np.shape(mask) != np.shape(fits_data):
raise ValueError('Mask must have the same shape as data')
if replace: overwrite=True
if overwrite:
warnings.filterwarnings(
'ignore', message='Overwriting existing file.*',
module='astropy.io.*')
if replace and os.path.exists(fits_path):
old_data = read_fits(fits_path)
if old_data.shape == fits_data.shape:
fits_data[np.isnan(fits_data)] = old_data[np.isnan(fits_data)]
else:
raise Exception("New data shape %s and old data shape %s are not the same. Do not set the option 'replace' to True in this case"%(str(fits_data.shape), str(old_data.shape)))
# float64/128 data conversion to float32 to avoid too big files
# with unnecessary precision
if fits_data.dtype == np.float64 or fits_data.dtype == np.float128:
fits_data = fits_data.astype(np.float32)
# complex data cannot be written in fits
if np.iscomplexobj(fits_data):
fits_data = fits_data.real.astype(np.float32)
logging.warning('Complex data cast to float32 (FITS format do not support complex data)')
base_fits_path = fits_path
dirname = os.path.dirname(fits_path)
if (dirname != []) and (dirname != ''):
if not os.path.exists(dirname):
os.makedirs(dirname)
index=0
file_written = False
while not file_written:
if ((not (os.path.exists(fits_path))) or overwrite):
if len(fits_data.shape) > 1:
hdu = pyfits.PrimaryHDU(fits_data.transpose())
elif len(fits_data.shape) == 1:
hdu = pyfits.PrimaryHDU(fits_data[np.newaxis, :])
else: # 1 number only
hdu = pyfits.PrimaryHDU(np.array([fits_data]))
if mask is not None:
# mask conversion to only zeros or ones
mask = mask.astype(float)
mask[np.nonzero(np.isnan(mask))] = 1.
mask[np.nonzero(np.isinf(mask))] = 1.
mask[np.nonzero(mask)] = 1.
mask = mask.astype(np.uint8) # UINT8 is the
# smallest allowed
# type
hdu_mask = pyfits.PrimaryHDU(mask.transpose())
# add header optional keywords
if fits_header is not None:
## remove keys of the passed header which corresponds
## to the description of the data set
for ikey in SECURED_KEYS:
if ikey in fits_header: fits_header.pop(ikey)
hdu.header.extend(fits_header, strip=False,
update=True, end=True)
# Remove 3rd axis related keywords if there is no
# 3rd axis
if len(fits_data.shape) <= 2:
for ikey in range(len(hdu.header)):
if isinstance(hdu.header[ikey], str):
if ('Wavelength axis' in hdu.header[ikey]):
del hdu.header[ikey]
del hdu.header[ikey]
break
if 'CTYPE3' in hdu.header:
del hdu.header['CTYPE3']
if 'CRVAL3' in hdu.header:
del hdu.header['CRVAL3']
if 'CRPIX3' in hdu.header:
del hdu.header['CRPIX3']
if 'CDELT3' in hdu.header:
del hdu.header['CDELT3']
if 'CROTA3' in hdu.header:
del hdu.header['CROTA3']
if 'CUNIT3' in hdu.header:
del hdu.header['CUNIT3']
# add median and mean of the image in the header
# data is nan filtered before
if record_stats:
fdata = fits_data[np.nonzero(~np.isnan(fits_data))]
if np.size(fdata) > 0:
data_mean = np.nanmean(fdata)
data_median = np.nanmedian(fdata)
else:
data_mean = np.nan
data_median = np.nan
hdu.header.set('MEAN', str(data_mean),
'Mean of data (NaNs filtered)',
after=5)
hdu.header.set('MEDIAN', str(data_median),
'Median of data (NaNs filtered)',
after=5)
# add some basic keywords in the header
date = time.strftime("%Y-%m-%d", time.localtime(time.time()))
hdu.header.set('MASK', 'False', '', after=5)
hdu.header.set('DATE', date, 'Creation date', after=5)
hdu.header.set('PROGRAM', "ORB",
'Thomas Martin: thomas.martin.1@ulaval.ca',
after=5)
# write FITS file
hdu.writeto(fits_path, overwrite=overwrite)
if mask is not None:
hdu_mask.header = hdu.header
hdu_mask.header.set('MASK', 'True', '', after=6)
if mask_path is None:
mask_path = os.path.splitext(fits_path)[0] + '_mask.fits'
hdu_mask.writeto(mask_path, overwrite=overwrite)
if not (silent):
logging.info("Data written as {} in {:.2f} s ".format(
fits_path, time.time() - start_time))
return fits_path
else :
fits_path = (os.path.splitext(base_fits_path)[0] +
"_" + str(index) +
os.path.splitext(base_fits_path)[1])
index += 1
def read_fits(fits_path, no_error=False, nan_filter=False,
return_header=False, return_hdu_only=False,
return_mask=False, silent=False, delete_after=False,
data_index=None, image_mode='classic', chip_index=None,
binning=None, fix_header=True, dtype=float,
mask_path=None):
"""Read a FITS data file and returns its data.
:param fits_path: Path to the file, can be either
relative or absolut.
:param no_error: (Optional) If True this function will only
display a warning message if the file does not exist (so it
does not raise an exception) (default False)
:param nan_filter: (Optional) If True replace NaN by zeros
(default False)
:param return_header: (Optional) If True return a tuple (data,
header) (default False).
:param return_hdu_only: (Optional) If True return FITS header
data unit only. No data will be returned (default False).
:param return_mask: (Optional) If True return only the mask
corresponding to the data file (default False).
:param silent: (Optional) If True no message is displayed
except if an error is raised (default False).
:param delete_after: (Optional) If True delete file after
reading (default False).
:param data_index: (Optional) Index of data in the header data
unit (Default None).
:param image_mode: (Optional) Can be 'sitelle', 'spiomm' or
'classic'. In 'sitelle' mode, the parameter
chip_index must also be set to 0 or 1. In this mode only
one of both SITELLE quadrants is returned. In 'classic' mode
the whole frame is returned (default 'classic').
:param chip_index: (Optional) Index of the chip of the
SITELLE image. Used only if image_mode is set to 'sitelle'
In this case, must be 1 or 2. Else must be None (default
None).
:param binning: (Optional) If not None, returned data is
binned by this amount (must be an integer >= 1)
:param fix_header: (Optional) If True, fits header is
fixed to avoid errors due to header inconsistencies
(e.g. WCS errors) (default True).
:param dtype: (Optional) Data is converted to
the given dtype (e.g. np.float32, default float).
:param mask_path: (Optional) Path to the corresponding mask image.
.. note:: Please refer to
http://www.stsci.edu/institute/software_hardware/pyfits/ for
more information on PyFITS module. And
http://fits.gsfc.nasa.gov/ for more information on FITS
files.
"""
# avoid bugs fits with no data in the first hdu
fits_path = ((fits_path.splitlines())[0]).strip()
if return_mask:
if mask_path is None:
mask_path = os.path.splitext(fits_path)[0] + '_mask.fits'
fits_path = mask_path
try:
warnings.filterwarnings('ignore', module='astropy')
warnings.filterwarnings('ignore', category=ResourceWarning)
hdulist = pyfits.open(fits_path)
if data_index is None:
data_index = get_hdu_data_index(hdulist)
fits_header = hdulist[data_index].header
except Exception as e:
if not no_error:
raise IOError(
"File '%s' could not be opened: {}, {}".format(fits_path, e))
else:
if not silent:
logging.warning(
"File '%s' could not be opened {}, {}".format(fits_path, e))
return None
# Correct header
if fix_header:
if fits_header['NAXIS'] == 2:
if 'CTYPE3' in fits_header: del fits_header['CTYPE3']
if 'CRVAL3' in fits_header: del fits_header['CRVAL3']
if 'CUNIT3' in fits_header: del fits_header['CUNIT3']
if 'CRPIX3' in fits_header: del fits_header['CRPIX3']
if 'CROTA3' in fits_header: del fits_header['CROTA3']
if return_hdu_only:
return hdulist[data_index]
else:
if image_mode == 'classic':
fits_data = np.array(
hdulist[data_index].data.transpose()).astype(dtype)
elif image_mode == 'sitelle':
fits_data = read_sitelle_chip(hdulist[data_index], chip_index)
elif image_mode == 'spiomm':
fits_data, fits_header = read_spiomm_data(
hdulist, fits_path)
else:
raise ValueError("Image_mode must be set to 'sitelle', 'spiomm' or 'classic'")
hdulist.close
if binning is not None:
fits_data = utils.image.bin_image(fits_data, binning)
if (nan_filter):
fits_data = np.nan_to_num(fits_data)
if delete_after:
try:
os.remove(fits_path)
except:
logging.warning("The file '%s' could not be deleted"%fits_path)
if return_header:
return np.squeeze(fits_data), fits_header
else:
return np.squeeze(fits_data)
def get_hdu_data_index(hdul):
"""Return the index of the first header data unit (HDU) containing data.
:param hdul: A pyfits.HDU instance
"""
hdu_data_index = 0
while (hdul[hdu_data_index].data is None):
hdu_data_index += 1
if hdu_data_index >= len(hdul):
raise Exception('No data recorded in FITS file')
return hdu_data_index
def read_sitelle_chip(hdu, chip_index, substract_bias=True):
"""Return chip data of a SITELLE FITS image.
:param hdu: pyfits.HDU Instance of the SITELLE image
:param chip_index: Index of the chip to read. Must be 1 or 2.
:param substract_bias: If True bias is automatically
substracted by using the overscan area (default True).
"""
def get_slice(key, index):
key = '{}{}'.format(key, index)
if key not in hdu.header: raise Exception(
'Bad SITELLE image header')
chip_section = hdu.header[key]
return get_sitelle_slice(chip_section)
def get_data(key, index, frame):
xslice, yslice = get_slice(key, index)
return np.copy(frame[yslice, xslice]).transpose()
if int(chip_index) not in (1,2): raise Exception(
'Chip index must be 1 or 2')
frame = hdu.data.astype(np.float)
# get data without bias substraction
if not substract_bias:
return get_data('DSEC', chip_index, frame)
if chip_index == 1:
amps = ['A', 'B', 'C', 'D']
elif chip_index == 2:
amps = ['E', 'F', 'G', 'H']
xchip, ychip = get_slice('DSEC', chip_index)
data = np.empty((xchip.stop - xchip.start, ychip.stop - ychip.start),
dtype=float)
# removing bias
for iamp in amps:
xamp, yamp = get_slice('DSEC', iamp)
amp_data = get_data('DSEC', iamp, frame)
bias_data = get_data('BSEC', iamp, frame)
overscan_size = int(bias_data.shape[0]/2)
if iamp in ['A', 'C', 'E', 'G']:
bias_data = bias_data[-overscan_size:,:]
else:
bias_data = bias_data[:overscan_size,:]
bias_data = np.mean(bias_data, axis=0)
amp_data = amp_data - bias_data
data[xamp.start - xchip.start: xamp.stop - xchip.start,
yamp.start - ychip.start: yamp.stop - ychip.start] = amp_data
return data
def get_sitelle_slice(slice_str):
"""
Strip a string containing SITELLE like slice coordinates.
:param slice_str: Slice string.
"""
if "'" in slice_str:
slice_str = slice_str[1:-1]
section = slice_str[1:-1].split(',')
x_min = int(section[0].split(':')[0]) - 1
x_max = int(section[0].split(':')[1])
y_min = int(section[1].split(':')[0]) - 1
y_max = int(section[1].split(':')[1])
return slice(x_min,x_max,1), slice(y_min,y_max,1)
def read_spiomm_data(hdu, image_path, substract_bias=True):
"""Return data of an SpIOMM FITS image.
:param hdu: pyfits.HDU Instance of the SpIOMM image
:param image_path: Image path
:param substract_bias: If True bias is automatically
substracted by using the associated bias frame as an
overscan frame. Mean bias level is thus computed along the y
axis of the bias frame (default True).
"""
CENTER_SIZE_COEFF = 0.1
data_index = get_hdu_data_index(hdu)
frame = np.array(hdu[data_index].data.transpose()).astype(np.float)
hdr = hdu[data_index].header
# check presence of a bias
bias_path = os.path.splitext(image_path)[0] + '_bias.fits'
if os.path.exists(bias_path):
bias_frame = read_fits(bias_path)
if substract_bias:
## create overscan line
overscan = orb.cutils.meansigcut2d(bias_frame, axis=1)
frame = (frame.T - overscan.T).T
x_min = int(bias_frame.shape[0]/2.
- CENTER_SIZE_COEFF * bias_frame.shape[0])
x_max = int(bias_frame.shape[0]/2.
+ CENTER_SIZE_COEFF * bias_frame.shape[0] + 1)
y_min = int(bias_frame.shape[1]/2.
- CENTER_SIZE_COEFF * bias_frame.shape[1])
y_max = int(bias_frame.shape[1]/2.
+ CENTER_SIZE_COEFF * bias_frame.shape[1] + 1)
bias_level = np.nanmedian(bias_frame[x_min:x_max, y_min:y_max])
if bias_level is not np.nan:
hdr['BIAS-LVL'] = (
bias_level,
'Bias level (moment, at the center of the frame)')
return frame, hdr
def open_hdf5(file_path, mode):
"""Return a :py:class:`h5py.File` instance with some
informations.
:param file_path: Path to the hdf5 file.
:param mode: Opening mode. Can be 'r', 'r+', 'w', 'w-', 'x',
'a'.
.. note:: Please refer to http://www.h5py.org/.
"""
if mode in ['w', 'a', 'w-', 'x']:
# create folder if it does not exist
dirname = os.path.dirname(file_path)
if dirname != '':
if not os.path.exists(dirname):
os.makedirs(dirname)
f = h5py.File(file_path, mode)
if mode in ['w', 'a', 'w-', 'x', 'r+']:
f.attrs['program'] = 'Created/modified with ORB'
f.attrs['date'] = str(datetime.datetime.now())
return f
def write_hdf5(file_path, data, header=None,
silent=False, overwrite=True, max_hdu_check=True,
compress=False):
"""
Write data in HDF5 format.
A header can be added to the data. This method is useful to
handle an HDF5 data file like a FITS file. It implements most
of the functionality of the method
:py:meth:`core.Tools.write_fits`.
.. note:: The output HDF5 file can contain mutiple data header
units (HDU). Each HDU is in a specific group named 'hdu*', *
being the index of the HDU. The first HDU is named
HDU0. Each HDU contains one data group (HDU*/data) which
contains a numpy.ndarray and one header group
(HDU*/header). Each subgroup of a header group is a keyword
and its associated value, comment and type.
:param file_path: Path to the HDF5 file to create
:param data: A numpy array (numpy.ndarray instance) of numeric
values. If a list of arrays is given, each array will be
placed in a specific HDU. The header keyword must also be
set to a list of headers of the same length.
:param header: (Optional) Optional keywords to update or
create. It can be a pyfits.Header() instance or a list of
tuples [(KEYWORD_1, VALUE_1, COMMENT_1), (KEYWORD_2,
VALUE_2, COMMENT_2), ...]. Standard keywords like SIMPLE,
BITPIX, NAXIS, EXTEND does not have to be passed (default
None). It can also be a list of headers if a list of arrays
has been passed to the option 'data'.
:param max_hdu_check: (Optional): When True, if the input data
is a list (interpreted as a list of data unit), check if
it's length is not too long to make sure that the input list
is not a single data array that has not been converted to a
numpy.ndarray format. If the number of HDU to create is
indeed very long this can be set to False (default True).
:param silent: (Optional) If True turn this function won't
display any message (default False)
:param overwrite: (Optional) If True overwrite the output file
if it exists (default True).
:param compress: (Optional) If True data is compressed using
the SZIP library (see
https://www.hdfgroup.org/doc_resource/SZIP/). SZIP library
must be installed (default False).
.. note:: Please refer to http://www.h5py.org/.
"""
MAX_HDUS = 3
start_time = time.time()
# change extension if nescessary
if os.path.splitext(file_path)[1] != '.hdf5':
file_path = os.path.splitext(file_path)[0] + '.hdf5'
# Check if data is a list of arrays.
if not isinstance(data, list):
data = [data]
if max_hdu_check and len(data) > MAX_HDUS:
raise Exception('Data list length is > {}. As a list is interpreted has a list of data unit make sure to pass a numpy.ndarray instance instead of a list. '.format(MAX_HDUS))
# Check header format
if header is not None:
if isinstance(header, pyfits.Header):
header = [header]
elif isinstance(header, list):
if (isinstance(header[0], list)
or isinstance(header[0], tuple)):
header_seems_ok = False
if (isinstance(header[0][0], list)
or isinstance(header[0][0], tuple)):
# we have a list of headers
if len(header) == len(data):
header_seems_ok = True
elif isinstance(header[0][0], str):
# we only have one header
if len(header[0]) > 2:
header = [header]
header_seems_ok = True
if not header_seems_ok:
raise Exception('Badly formated header')
elif not isinstance(header[0], pyfits.Header):
raise Exception('Header must be a pyfits.Header instance or a list')
else:
raise Exception('Header must be a pyfits.Header instance or a list')
if len(header) != len(data):
raise Exception('The number of headers must be the same as the number of data units.')
# change path if file exists and must not be overwritten
new_file_path = str(file_path)
if not overwrite and os.path.exists(new_file_path):
index = 0
while os.path.exists(new_file_path):
new_file_path = (os.path.splitext(file_path)[0] +
"_" + str(index) +
os.path.splitext(file_path)[1])
index += 1
# open file
with open_hdf5(new_file_path, 'w') as f:
## add data + header
for i in range(len(data)):
idata = data[i]
# Check if data has a valid format.
if not isinstance(idata, np.ndarray):
try:
idata = np.array(idata, dtype=float)
except Exception as e:
raise Exception('Data to write must be convertible to a numpy array of numeric values: {}'.format(e))
# convert data to float32
if idata.dtype == np.float64:
idata = idata.astype(np.float32)
# hdu name
hdu_group_name = 'hdu{}'.format(i)
if compress:
f.create_dataset(
hdu_group_name + '/data', data=idata,
compression='lzf', compression_opts=None)
#compression='szip', compression_opts=('nn', 32))
#compression='gzip', compression_opts=9)
else:
f.create_dataset(
hdu_group_name + '/data', data=idata)
# add header
if header is not None:
iheader = header[i]
if not isinstance(iheader, pyfits.Header):
iheader = pyfits.Header(iheader)
f[hdu_group_name + '/header'] = header_fits2hdf5(
iheader)
logging.info('Data written as {} in {:.2f} s'.format(
new_file_path, time.time() - start_time))
return new_file_path
castables = [int, float, bool, str,
np.int64, np.float64, int, np.float128, np.bool_]
def cast(a, t_str):
if isinstance(t_str, bytes):
t_str = t_str.decode()
if 'type' in t_str: t_str = t_str.replace('type', 'class')
if 'long' in t_str: t_str = t_str.replace('long', 'int')
for _t in castables:
if t_str == repr(_t):
return _t(a)
raise Exception('Bad type string {} should be in {}'.format(t_str, [repr(_t) for _t in castables]))
def dict2array(data):
"""Convert a dictionary to an array that can be written in an hdf5 file
:param data: Must be a dict instance
"""
if not isinstance(data, dict): raise TypeError('data must be a dict')
arr = list()
for key in data:
if type(data[key]) in castables:
_tstr = str(type(data[key]))
arr.append(np.array(
(key, data[key], _tstr)))
else:
logging.debug('{} of type {} not passed to array'.format(key, type(data[key])))
return np.array(arr)
def array2dict(data):
"""Convert an array read from an hdf5 file to a dict.
:param data: array of params returned by dict2array
"""
_dict = dict()
for i in range(len(data)):
_dict[data[i][0]] = cast(data[i][1], data[i][2])
return _dict
def dict2header(params):
"""convert a dict to a pyfits.Header() instance
.. warning:: this is a destructive process, illegal values are
removed from the header.
:param params: a dict instance
"""
# filter illegal header values
cards = list()
for iparam in params:
val = params[iparam]
val_ok = False
for itype in castables:
if isinstance(val, itype):
val_ok = True
if val_ok:
if isinstance(val, bool):
val = int(val)
card = pyfits.Card(
keyword=iparam,
value=val,
comment=None)
try:
card.verify(option='exception')
cards.append(card)
except (VerifyError, ValueError, TypeError):
pass
warnings.simplefilter('ignore', category=VerifyWarning)
warnings.simplefilter('ignore', category=AstropyUserWarning)
warnings.simplefilter('ignore', category=FITSFixedWarning)
header = pyfits.Header(cards)
return header
def header_fits2hdf5(fits_header):
"""convert a pyfits.Header() instance to a header for an hdf5 file
:param fits_header: Header of the FITS file
"""
hdf5_header = list()
for ikey in range(len(fits_header)):
_tstr = str(type(fits_header[ikey]))
ival = np.array(
(list(fits_header.keys())[ikey], str(fits_header[ikey]),
fits_header.comments[ikey], _tstr))
hdf5_header.append(ival)
return np.array(hdf5_header, dtype='S300')
def header_hdf52fits(hdf5_header):
"""convert an hdf5 header to a pyfits.Header() instance.
:param hdf5_header: Header of the HDF5 file
"""
fits_header = pyfits.Header()
for i in range(hdf5_header.shape[0]):
ival = hdf5_header[i,:]
ival = [iival.decode() for iival in ival]
if ival[3] != 'comment':
fits_header[ival[0]] = cast(ival[1], ival[3]), str(ival[2])
else:
fits_header['comment'] = ival[1]
return fits_header
def read_hdf5(file_path, return_header=False, dtype=float):
"""Read an HDF5 data file created with
:py:meth:`core.Tools.write_hdf5`.
:param file_path: Path to the file, can be either
relative or absolute.
:param return_header: (Optional) If True return a tuple (data,
header) (default False).
:param dtype: (Optional) Data is converted to the given type
(e.g. np.float32, default float).
.. note:: Please refer to http://www.h5py.org/."""
with open_hdf5(file_path, 'r') as f:
data = list()
header = list()
for hdu_name in f:
data.append(f[hdu_name + '/data'][:].astype(dtype))
if return_header:
if hdu_name + '/header' in f:
# extract header
header.append(
header_hdf52fits(f[hdu_name + '/header'][:]))
else: header.append(None)
if len(data) == 1:
if return_header:
return data[0], header[0]
else:
return data[0]
else:
if return_header:
return data, header
else:
return data
def cast2hdf5(val):
if val is None:
return 'None'
elif isinstance(val, np.float128):
return val.astype(np.float64)
#elif isinstance(val, int):
# return str(val)
elif isinstance(val, np.ndarray):
if val.dtype == np.float128:
return val.astype(np.float64)
return val
def get_storing_dtype(arr):
if not isinstance(arr, np.ndarray):
raise TypeError('arr must be a numpy.ndarray instance')
if arr.dtype == np.float64:
return np.float32
if arr.dtype == np.complex128:
return np.complex64
else: return arr.dtype
def cast_storing_dtype(arr):
if not isinstance(arr, np.ndarray):
raise TypeError('arr must be a numpy.ndarray instance')
return arr.astype(get_storing_dtype(arr))
def save_dflist(dflist, path):
"""Save a list of dataframes
:param dflist: list of pandas dataframes
:param path: path to the output file
"""
if os.path.exists(path):
os.remove(path)
with open_hdf5(path, 'w') as f:
f.attrs['len'] = len(dflist)
for idf in range(len(dflist)):
if dflist[idf] is not None:
dflist[idf].to_hdf(path, 'df{:06d}'.format(idf),
format='table', mode='a')
def load_dflist(path):
"""Save a list of dataframes
:param path: path to the output file
"""
with open_hdf5(path, 'r') as f:
_len = f.attrs['len']
dflist = list()
for i in range(_len):
try:
idf = pd.read_hdf(path, key='df{:06d}'.format(i))
dflist.append(idf)
except KeyError:
dflist.append(None)
return dflist
def read_votable(votable_file):
"""read a votable and transfer it as as pandas dataframe.
taken from https://gist.github.com/icshih/52ca49eb218a2d5b660ee4a653301b2b
"""
votable = astropy.io.votable.parse(votable_file)
table = votable.get_first_table().to_table(use_names_over_ids=True)
return table.to_pandas()
def save_starlist(path, starlist):
"""Save a star list as a two columnfile X, Y readable by ds9
"""
orb.utils.validate.is_2darray(starlist, object_name='starlist')
if starlist.shape[1] != 2:
raise TypeError('starlist must be of shape (n,2)')
with open_file(path, 'w') as f:
for i in range(starlist.shape[0]):
f.write('{} {}\n'.format(starlist[i,0], starlist[i,1]))
f.flush()
| gpl-3.0 |
TeslaProject/external_chromium_org | tools/perf/page_sets/polymer.py | 29 | 8930 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class PolymerPage(page_module.Page):
def __init__(self, url, page_set):
super(PolymerPage, self).__init__(
url=url,
page_set=page_set)
self.script_to_evaluate_on_commit = '''
document.addEventListener("polymer-ready", function() {
window.__polymer_ready = true;
});
'''
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'window.__polymer_ready')
class PolymerCalculatorPage(PolymerPage):
def __init__(self, page_set):
super(PolymerCalculatorPage, self).__init__(
url=('http://www.polymer-project.org/components/paper-calculator/'
'demo.html'),
page_set=page_set)
def RunSmoothness(self, action_runner):
self.TapButton(action_runner)
self.SlidePanel(action_runner)
def TapButton(self, action_runner):
interaction = action_runner.BeginInteraction(
'Action_TapAction', is_smooth=True)
action_runner.TapElement(element_function='''
document.querySelector(
'body /deep/ #outerPanels'
).querySelector(
'#standard'
).shadowRoot.querySelector(
'paper-calculator-key[label="5"]'
)''')
action_runner.Wait(2)
interaction.End()
def SlidePanel(self, action_runner):
# only bother with this interaction if the drawer is hidden
opened = action_runner.EvaluateJavaScript('''
(function() {
var outer = document.querySelector("body /deep/ #outerPanels");
return outer.opened || outer.wideMode;
}());''')
if not opened:
interaction = action_runner.BeginInteraction(
'Action_SwipeAction', is_smooth=True)
action_runner.SwipeElement(
left_start_ratio=0.1, top_start_ratio=0.2,
direction='left', distance=300, speed_in_pixels_per_second=5000,
element_function='''
document.querySelector(
'body /deep/ #outerPanels'
).querySelector(
'#advanced'
).shadowRoot.querySelector(
'.handle-bar'
)''')
action_runner.WaitForJavaScriptCondition('''
var outer = document.querySelector("body /deep/ #outerPanels");
outer.opened || outer.wideMode;''')
interaction.End()
class PolymerShadowPage(PolymerPage):
def __init__(self, page_set):
super(PolymerShadowPage, self).__init__(
url='http://www.polymer-project.org/components/paper-shadow/demo.html',
page_set=page_set)
def RunSmoothness(self, action_runner):
action_runner.ExecuteJavaScript(
"document.getElementById('fab').scrollIntoView()")
action_runner.Wait(5)
self.AnimateShadow(action_runner, 'card')
#FIXME(wiltzius) disabling until this issue is fixed:
# https://github.com/Polymer/paper-shadow/issues/12
#self.AnimateShadow(action_runner, 'fab')
def AnimateShadow(self, action_runner, eid):
for i in range(1, 6):
action_runner.ExecuteJavaScript(
'document.getElementById("{0}").z = {1}'.format(eid, i))
action_runner.Wait(1)
class PolymerSampler(PolymerPage):
def __init__(self, page_set, anchor, scrolling_page=False):
"""Page exercising interactions with a single Paper Sampler subpage.
Args:
page_set: Page set to inforporate this page into.
anchor: string indicating which subpage to load (matches the element
type that page is displaying)
scrolling_page: Whether scrolling the content pane is relevant to this
content page or not.
"""
super(PolymerSampler, self).__init__(
url=('http://www.polymer-project.org/components/%s/demo.html' % anchor),
page_set=page_set)
self.scrolling_page = scrolling_page
self.iframe_js = 'document'
def RunNavigateSteps(self, action_runner):
super(PolymerSampler, self).RunNavigateSteps(action_runner)
waitForLoadJS = """
window.Polymer.whenPolymerReady(function() {
%s.contentWindow.Polymer.whenPolymerReady(function() {
window.__polymer_ready = true;
})
});
""" % self.iframe_js
action_runner.ExecuteJavaScript(waitForLoadJS)
action_runner.WaitForJavaScriptCondition(
'window.__polymer_ready')
def RunSmoothness(self, action_runner):
#TODO(wiltzius) Add interactions for input elements and shadow pages
if self.scrolling_page:
# Only bother scrolling the page if its been marked as worthwhile
self.ScrollContentPane(action_runner)
self.TouchEverything(action_runner)
def ScrollContentPane(self, action_runner):
element_function = (self.iframe_js + '.querySelector('
'"core-scroll-header-panel").$.mainContainer')
interaction = action_runner.BeginInteraction('Scroll_Page', is_smooth=True)
action_runner.ScrollElement(use_touch=True,
direction='down',
distance='900',
element_function=element_function)
interaction.End()
interaction = action_runner.BeginInteraction('Scroll_Page', is_smooth=True)
action_runner.ScrollElement(use_touch=True,
direction='up',
distance='900',
element_function=element_function)
interaction.End()
def TouchEverything(self, action_runner):
tappable_types = [
'paper-button',
'paper-checkbox',
'paper-fab',
'paper-icon-button',
# crbug.com/394756
# 'paper-radio-button',
'paper-tab',
'paper-toggle-button',
'x-shadow',
]
for tappable_type in tappable_types:
self.DoActionOnWidgetType(action_runner, tappable_type, self.TapWidget)
swipeable_types = ['paper-slider']
for swipeable_type in swipeable_types:
self.DoActionOnWidgetType(action_runner, swipeable_type, self.SwipeWidget)
def DoActionOnWidgetType(self, action_runner, widget_type, action_function):
# Find all widgets of this type, but skip any that are disabled or are
# currently active as they typically don't produce animation frames.
element_list_query = (self.iframe_js +
('.querySelectorAll("body %s:not([disabled]):'
'not([active])")' % widget_type))
roles_count_query = element_list_query + '.length'
for i in range(action_runner.EvaluateJavaScript(roles_count_query)):
element_query = element_list_query + ("[%d]" % i)
if action_runner.EvaluateJavaScript(
element_query + '.offsetParent != null'):
# Only try to tap on visible elements (offsetParent != null)
action_runner.ExecuteJavaScript(element_query + '.scrollIntoView()')
action_runner.Wait(1) # wait for page to settle after scrolling
action_function(action_runner, element_query)
def TapWidget(self, action_runner, element_function):
interaction = action_runner.BeginInteraction(
'Tap_Widget', is_smooth=True)
action_runner.TapElement(element_function=element_function)
action_runner.Wait(1) # wait for e.g. animations on the widget
interaction.End()
def SwipeWidget(self, action_runner, element_function):
interaction = action_runner.BeginInteraction(
'Swipe_Widget', is_smooth=True)
action_runner.SwipeElement(element_function=element_function,
left_start_ratio=0.75,
speed_in_pixels_per_second=300)
interaction.End()
class PolymerPageSet(page_set_module.PageSet):
def __init__(self):
super(PolymerPageSet, self).__init__(
user_agent_type='mobile',
archive_data_file='data/polymer.json',
bucket=page_set_module.PUBLIC_BUCKET)
self.AddPage(PolymerCalculatorPage(self))
self.AddPage(PolymerShadowPage(self))
# Polymer Sampler subpages that are interesting to tap / swipe elements on
TAPPABLE_PAGES = [
'paper-button',
'paper-checkbox',
'paper-fab',
'paper-icon-button',
# crbug.com/394756
# 'paper-radio-button',
#FIXME(wiltzius) Disabling x-shadow until this issue is fixed:
# https://github.com/Polymer/paper-shadow/issues/12
#'paper-shadow',
'paper-tabs',
'paper-toggle-button',
]
for p in TAPPABLE_PAGES:
self.AddPage(PolymerSampler(self, p))
# Polymer Sampler subpages that are interesting to scroll
SCROLLABLE_PAGES = [
'core-scroll-header-panel',
]
for p in SCROLLABLE_PAGES:
self.AddPage(PolymerSampler(self, p, scrolling_page=True))
| bsd-3-clause |
GalenMa/kolla | kolla/tests/test_hacking.py | 8 | 1714 | # Copyright 2016 GohighSec
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
from kolla.hacking import checks
from kolla.tests import base
@ddt.ddt
class HackingTestCase(base.TestCase):
"""Hacking test cases
This class tests the hacking checks in kolla.hacking.checks by passing
strings to the check methods like the pep8/flake8 parser would. The parser
loops over each line in the file and then passes the parameters to the
check method.
"""
def test_no_log_warn_check(self):
self.assertEqual(0, len(list(checks.no_log_warn(
"LOG.warning('This should not trigger LOG.warn"
"hacking check.')"))))
self.assertEqual(1, len(list(checks.no_log_warn(
"LOG.warn('We should not use LOG.warn')"))))
def test_no_mutable_default_args(self):
self.assertEqual(1, len(list(checks.no_mutable_default_args(
"def get_info_from_bdm(virt_type, bdm, mapping=[])"))))
self.assertEqual(0, len(list(checks.no_mutable_default_args(
"defined = []"))))
self.assertEqual(0, len(list(checks.no_mutable_default_args(
"defined, undefined = [], {}"))))
| apache-2.0 |
kvar/ansible | lib/ansible/modules/network/f5/bigip_wait.py | 38 | 11246 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_wait
short_description: Wait for a BIG-IP condition before continuing
description:
- You can wait for BIG-IP to be "ready". By "ready", we mean that BIG-IP is ready
to accept configuration.
- This module can take into account situations where the device is in the middle
of rebooting due to a configuration change.
version_added: 2.5
options:
timeout:
description:
- Maximum number of seconds to wait for.
- When used without other conditions it is equivalent of just sleeping.
- The default timeout is deliberately set to 2 hours because no individual
REST API.
type: int
default: 7200
delay:
description:
- Number of seconds to wait before starting to poll.
type: int
default: 0
sleep:
description:
- Number of seconds to sleep between checks, before 2.3 this was hardcoded to 1 second.
type: int
default: 1
msg:
description:
- This overrides the normal error message from a failure to meet the required conditions.
type: str
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Wait for BIG-IP to be ready to take configuration
bigip_wait:
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Wait a maximum of 300 seconds for BIG-IP to be ready to take configuration
bigip_wait:
timeout: 300
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Wait for BIG-IP to be ready, don't start checking for 10 seconds
bigip_wait:
delay: 10
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
# only common fields returned
'''
import datetime
import signal
import time
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import f5_argument_spec
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import f5_argument_spec
def hard_timeout(module, want, start):
elapsed = datetime.datetime.utcnow() - start
module.fail_json(
msg=want.msg or "Timeout when waiting for BIG-IP", elapsed=elapsed.seconds
)
class Parameters(AnsibleF5Parameters):
returnables = [
'elapsed'
]
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
@property
def delay(self):
if self._values['delay'] is None:
return None
return int(self._values['delay'])
@property
def timeout(self):
if self._values['timeout'] is None:
return None
return int(self._values['timeout'])
@property
def sleep(self):
if self._values['sleep'] is None:
return None
return int(self._values['sleep'])
class Changes(Parameters):
pass
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.have = None
self.want = Parameters(params=self.module.params)
self.changes = Parameters()
def exec_module(self):
result = dict()
changed = self.execute()
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _get_client_connection(self):
return F5RestClient(**self.module.params)
def execute(self):
signal.signal(
signal.SIGALRM,
lambda sig, frame: hard_timeout(self.module, self.want, start)
)
# setup handler before scheduling signal, to eliminate a race
signal.alarm(int(self.want.timeout))
start = datetime.datetime.utcnow()
if self.want.delay:
time.sleep(float(self.want.delay))
end = start + datetime.timedelta(seconds=int(self.want.timeout))
while datetime.datetime.utcnow() < end:
time.sleep(int(self.want.sleep))
try:
# The first test verifies that the REST API is available; this is done
# by repeatedly trying to login to it.
self.client = self._get_client_connection()
if not self.client:
continue
if self._device_is_rebooting():
# Wait for the reboot to happen and then start from the beginning
# of the waiting.
continue
if self._is_mprov_running_on_device():
self._wait_for_module_provisioning()
break
except Exception as ex:
if 'Failed to validate the SSL' in str(ex):
raise F5ModuleError(str(ex))
# The types of exception's we're handling here are "REST API is not
# ready" exceptions.
#
# For example,
#
# Typically caused by device starting up:
#
# icontrol.exceptions.iControlUnexpectedHTTPError: 404 Unexpected Error:
# Not Found for uri: https://localhost:10443/mgmt/tm/sys/
# icontrol.exceptions.iControlUnexpectedHTTPError: 503 Unexpected Error:
# Service Temporarily Unavailable for uri: https://localhost:10443/mgmt/tm/sys/
#
#
# Typically caused by a device being down
#
# requests.exceptions.SSLError: HTTPSConnectionPool(host='localhost', port=10443):
# Max retries exceeded with url: /mgmt/tm/sys/ (Caused by SSLError(
# SSLError("bad handshake: SysCallError(-1, 'Unexpected EOF')",),))
#
#
# Typically caused by device still booting
#
# raise SSLError(e, request=request)\nrequests.exceptions.SSLError:
# HTTPSConnectionPool(host='localhost', port=10443): Max retries
# exceeded with url: /mgmt/shared/authn/login (Caused by
# SSLError(SSLError(\"bad handshake: SysCallError(-1, 'Unexpected EOF')\",),)),
continue
else:
elapsed = datetime.datetime.utcnow() - start
self.module.fail_json(
msg=self.want.msg or "Timeout when waiting for BIG-IP", elapsed=elapsed.seconds
)
elapsed = datetime.datetime.utcnow() - start
self.changes.update({'elapsed': elapsed.seconds})
return False
def _device_is_rebooting(self):
params = {
"command": "run",
"utilCmdArgs": '-c "runlevel"'
}
uri = "https://{0}:{1}/mgmt/tm/util/bash".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if 'commandResult' in response and '6' in response['commandResult']:
return True
return False
def _wait_for_module_provisioning(self):
# To prevent things from running forever, the hack is to check
# for mprov's status twice. If mprov is finished, then in most
# cases (not ASM) the provisioning is probably ready.
nops = 0
# Sleep a little to let provisioning settle and begin properly
time.sleep(5)
while nops < 4:
try:
if not self._is_mprov_running_on_device():
nops += 1
else:
nops = 0
except Exception as ex:
# This can be caused by restjavad restarting.
pass
time.sleep(10)
def _is_mprov_running_on_device(self):
params = {
"command": "run",
"utilCmdArgs": '-c "ps aux | grep \'[m]prov\'"'
}
uri = "https://{0}:{1}/mgmt/tm/util/bash".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if 'commandResult' in response:
return True
return False
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
timeout=dict(default=7200, type='int'),
delay=dict(default=0, type='int'),
sleep=dict(default=1, type='int'),
msg=dict()
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
resmo/ansible | lib/ansible/plugins/terminal/frr.py | 47 | 2237 | #
# (c) 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.errors import AnsibleConnectionFailure
from ansible.plugins.terminal import TerminalBase
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w\+\-\.:\/\[\]]+(?:\([^\)]+\)){0,3}(?:[>#]) ?$")
]
terminal_stderr_re = [
re.compile(br"% Command incomplete", re.I),
re.compile(br"% Unknown command", re.I),
re.compile(br"(?:\S+) instance is already running", re.I),
re.compile(br"% (?:Create|Specify) .* first", re.I),
re.compile(br"(?:\S+) is not running", re.I),
re.compile(br"% Can't find .*", re.I),
re.compile(br"invalid input", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"[^\r\n]+ not found"),
]
def on_open_shell(self):
try:
self._exec_cli_command(b'terminal length 0')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
def on_become(self, passwd=None):
# NOTE: For FRR, enable password only takes effect when telnetting to individual daemons
# vtysh will always drop into enable mode since it runs as a privileged process
pass
def on_unbecome(self):
# NOTE: For FRR, enable password only takes effect when telnetting to individual daemons
# vtysh will always drop into enable mode since it runs as a privileged process
pass
| gpl-3.0 |
gfyoung/elasticsearch | dev-tools/get-bwc-version.py | 100 | 3106 | # Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
'''
Downloads and extracts elasticsearch for backwards compatibility tests.
'''
import argparse
import os
import platform
import shutil
import subprocess
import urllib.request
import zipfile
def parse_config():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--path', metavar='DIR', default='./backwards',
help='Where to extract elasticsearch')
parser.add_argument('--force', action='store_true', default=False,
help='Delete and redownload if the version already exists')
parser.add_argument('version', metavar='X.Y.Z',
help='Version of elasticsearch to grab')
return parser.parse_args()
def main():
c = parse_config()
if not os.path.exists(c.path):
print('Creating %s' % c.path)
os.mkdir(c.path)
is_windows = platform.system() == 'Windows'
os.chdir(c.path)
version_dir = 'elasticsearch-%s' % c.version
if os.path.exists(version_dir):
if c.force:
print('Removing old download %s' % version_dir)
shutil.rmtree(version_dir)
else:
print('Version %s exists at %s' % (c.version, version_dir))
return
# before 1.4.0, the zip file contains windows scripts, and tar.gz contained *nix scripts
if is_windows:
filename = '%s.zip' % version_dir
else:
filename = '%s.tar.gz' % version_dir
if c.version == '1.2.0':
# 1.2.0 was pulled from download.elasticsearch.org because of routing bug:
url = 'http://central.maven.org/maven2/org/elasticsearch/elasticsearch/1.2.0/%s' % filename
elif c.version.startswith('0.') or c.version.startswith('1.') or c.version.startswith('2.'):
url = 'https://download.elasticsearch.org/elasticsearch/elasticsearch/%s' % filename
else:
url = 'https://artifacts.elastic.co/downloads/elasticsearch/%s' % filename
print('Downloading %s' % url)
urllib.request.urlretrieve(url, filename)
print('Extracting to %s' % version_dir)
if is_windows:
archive = zipfile.ZipFile(filename)
archive.extractall()
else:
# for some reason python's tarfile module has trouble with ES tgz?
subprocess.check_call('tar -xzf %s' % filename, shell=True)
print('Cleaning up %s' % filename)
os.remove(filename)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Ctrl-C caught, exiting')
| apache-2.0 |
FlorianLudwig/odoo | addons/account_bank_statement_extensions/report/__init__.py | 415 | 1128 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import bank_statement_balance_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
wyom/sympy | sympy/physics/quantum/innerproduct.py | 116 | 4261 | """Symbolic inner product."""
from __future__ import print_function, division
from sympy import Expr, conjugate
from sympy.printing.pretty.stringpict import prettyForm
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.state import KetBase, BraBase
__all__ = [
'InnerProduct'
]
# InnerProduct is not an QExpr because it is really just a regular commutative
# number. We have gone back and forth about this, but we gain a lot by having
# it subclass Expr. The main challenges were getting Dagger to work
# (we use _eval_conjugate) and represent (we can use atoms and subs). Having
# it be an Expr, mean that there are no commutative QExpr subclasses,
# which simplifies the design of everything.
class InnerProduct(Expr):
"""An unevaluated inner product between a Bra and a Ket [1].
Parameters
==========
bra : BraBase or subclass
The bra on the left side of the inner product.
ket : KetBase or subclass
The ket on the right side of the inner product.
Examples
========
Create an InnerProduct and check its properties:
>>> from sympy.physics.quantum import Bra, Ket, InnerProduct
>>> b = Bra('b')
>>> k = Ket('k')
>>> ip = b*k
>>> ip
<b|k>
>>> ip.bra
<b|
>>> ip.ket
|k>
In simple products of kets and bras inner products will be automatically
identified and created::
>>> b*k
<b|k>
But in more complex expressions, there is ambiguity in whether inner or
outer products should be created::
>>> k*b*k*b
|k><b|*|k>*<b|
A user can force the creation of a inner products in a complex expression
by using parentheses to group the bra and ket::
>>> k*(b*k)*b
<b|k>*|k>*<b|
Notice how the inner product <b|k> moved to the left of the expression
because inner products are commutative complex numbers.
References
==========
.. [1] http://en.wikipedia.org/wiki/Inner_product
"""
is_complex = True
def __new__(cls, bra, ket):
if not isinstance(ket, KetBase):
raise TypeError('KetBase subclass expected, got: %r' % ket)
if not isinstance(bra, BraBase):
raise TypeError('BraBase subclass expected, got: %r' % ket)
obj = Expr.__new__(cls, bra, ket)
return obj
@property
def bra(self):
return self.args[0]
@property
def ket(self):
return self.args[1]
def _eval_conjugate(self):
return InnerProduct(Dagger(self.ket), Dagger(self.bra))
def _sympyrepr(self, printer, *args):
return '%s(%s,%s)' % (self.__class__.__name__,
printer._print(self.bra, *args), printer._print(self.ket, *args))
def _sympystr(self, printer, *args):
sbra = str(self.bra)
sket = str(self.ket)
return '%s|%s' % (sbra[:-1], sket[1:])
def _pretty(self, printer, *args):
# Print state contents
bra = self.bra._print_contents_pretty(printer, *args)
ket = self.ket._print_contents_pretty(printer, *args)
# Print brackets
height = max(bra.height(), ket.height())
use_unicode = printer._use_unicode
lbracket, _ = self.bra._pretty_brackets(height, use_unicode)
cbracket, rbracket = self.ket._pretty_brackets(height, use_unicode)
# Build innerproduct
pform = prettyForm(*bra.left(lbracket))
pform = prettyForm(*pform.right(cbracket))
pform = prettyForm(*pform.right(ket))
pform = prettyForm(*pform.right(rbracket))
return pform
def _latex(self, printer, *args):
bra_label = self.bra._print_contents_latex(printer, *args)
ket = printer._print(self.ket, *args)
return r'\left\langle %s \right. %s' % (bra_label, ket)
def doit(self, **hints):
try:
r = self.ket._eval_innerproduct(self.bra, **hints)
except NotImplementedError:
try:
r = conjugate(
self.bra.dual._eval_innerproduct(self.ket.dual, **hints)
)
except NotImplementedError:
r = None
if r is not None:
return r
return self
| bsd-3-clause |
bhcopeland/ansible-modules-extras | cloud/vmware/vmware_dvs_host.py | 71 | 8335 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vmware_dvs_host
short_description: Add or remove a host from distributed virtual switch
description:
- Add or remove a host from distributed virtual switch
version_added: 2.0
author: "Joseph Callen (@jcpowermac)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
esxi_hostname:
description:
- The ESXi hostname
required: True
switch_name:
description:
- The name of the Distributed vSwitch
required: True
vmnics:
description:
- The ESXi hosts vmnics to use with the Distributed vSwitch
required: True
state:
description:
- If the host should be present or absent attached to the vSwitch
choices: ['present', 'absent']
required: True
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example vmware_dvs_host command from Ansible Playbooks
- name: Add Host to dVS
local_action:
module: vmware_dvs_host
hostname: vcenter_ip_or_hostname
username: vcenter_username
password: vcenter_password
esxi_hostname: esxi_hostname_as_listed_in_vcenter
switch_name: dvSwitch
vmnics:
- vmnic0
- vmnic1
state: present
'''
try:
import collections
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
class VMwareDvsHost(object):
def __init__(self, module):
self.module = module
self.dv_switch = None
self.uplink_portgroup = None
self.host = None
self.dv_switch = None
self.nic = None
self.content = connect_to_api(self.module)
self.state = self.module.params['state']
self.switch_name = self.module.params['switch_name']
self.esxi_hostname = self.module.params['esxi_hostname']
self.vmnics = self.module.params['vmnics']
def process_state(self):
try:
dvs_host_states = {
'absent': {
'present': self.state_destroy_dvs_host,
'absent': self.state_exit_unchanged,
},
'present': {
'update': self.state_update_dvs_host,
'present': self.state_exit_unchanged,
'absent': self.state_create_dvs_host,
}
}
dvs_host_states[self.state][self.check_dvs_host_state()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def find_dvspg_by_name(self):
portgroups = self.dv_switch.portgroup
for pg in portgroups:
if pg.name == self.portgroup_name:
return pg
return None
def find_dvs_uplink_pg(self):
# There should only always be a single uplink port group on
# a distributed virtual switch
if len(self.dv_switch.config.uplinkPortgroup):
return self.dv_switch.config.uplinkPortgroup[0]
else:
return None
# operation should be edit, add and remove
def modify_dvs_host(self, operation):
spec = vim.DistributedVirtualSwitch.ConfigSpec()
spec.configVersion = self.dv_switch.config.configVersion
spec.host = [vim.dvs.HostMember.ConfigSpec()]
spec.host[0].operation = operation
spec.host[0].host = self.host
if operation in ("edit", "add"):
spec.host[0].backing = vim.dvs.HostMember.PnicBacking()
count = 0
for nic in self.vmnics:
spec.host[0].backing.pnicSpec.append(vim.dvs.HostMember.PnicSpec())
spec.host[0].backing.pnicSpec[count].pnicDevice = nic
spec.host[0].backing.pnicSpec[count].uplinkPortgroupKey = self.uplink_portgroup.key
count += 1
task = self.dv_switch.ReconfigureDvs_Task(spec)
changed, result = wait_for_task(task)
return changed, result
def state_destroy_dvs_host(self):
operation = "remove"
changed = True
result = None
if not self.module.check_mode:
changed, result = self.modify_dvs_host(operation)
self.module.exit_json(changed=changed, result=str(result))
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_update_dvs_host(self):
operation = "edit"
changed = True
result = None
if not self.module.check_mode:
changed, result = self.modify_dvs_host(operation)
self.module.exit_json(changed=changed, result=str(result))
def state_create_dvs_host(self):
operation = "add"
changed = True
result = None
if not self.module.check_mode:
changed, result = self.modify_dvs_host(operation)
self.module.exit_json(changed=changed, result=str(result))
def find_host_attached_dvs(self):
for dvs_host_member in self.dv_switch.config.host:
if dvs_host_member.config.host.name == self.esxi_hostname:
return dvs_host_member.config.host
return None
def check_uplinks(self):
pnic_device = []
for dvs_host_member in self.dv_switch.config.host:
if dvs_host_member.config.host == self.host:
for pnicSpec in dvs_host_member.config.backing.pnicSpec:
pnic_device.append(pnicSpec.pnicDevice)
return collections.Counter(pnic_device) == collections.Counter(self.vmnics)
def check_dvs_host_state(self):
self.dv_switch = find_dvs_by_name(self.content, self.switch_name)
if self.dv_switch is None:
raise Exception("A distributed virtual switch %s does not exist" % self.switch_name)
self.uplink_portgroup = self.find_dvs_uplink_pg()
if self.uplink_portgroup is None:
raise Exception("An uplink portgroup does not exist on the distributed virtual switch %s"
% self.switch_name)
self.host = self.find_host_attached_dvs()
if self.host is None:
# We still need the HostSystem object to add the host
# to the distributed vswitch
self.host = find_hostsystem_by_name(self.content, self.esxi_hostname)
if self.host is None:
self.module.fail_json(msg="The esxi_hostname %s does not exist in vCenter" % self.esxi_hostname)
return 'absent'
else:
if self.check_uplinks():
return 'present'
else:
return 'update'
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(esxi_hostname=dict(required=True, type='str'),
switch_name=dict(required=True, type='str'),
vmnics=dict(required=True, type='list'),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmware_dvs_host = VMwareDvsHost(module)
vmware_dvs_host.process_state()
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
quarckster/cfme_tests | cfme/test_framework/appliance_police.py | 1 | 2826 | import attr
import pytest
import requests
from cfme.utils import ports
from cfme.utils.net import net_check
from cfme.utils.wait import TimedOutError
from cfme.utils.conf import rdb
from fixtures.pytest_store import store
from cfme.fixtures.rdb import Rdb
@attr.s
class AppliancePoliceException(Exception):
message = attr.ib()
port = attr.ib()
def __str__(self):
return "{} (port {})".format(self.message, self.port)
@pytest.fixture(autouse=True, scope="function")
def appliance_police():
if not store.slave_manager:
return
try:
port_numbers = {
'ssh': ports.SSH,
'https': store.current_appliance.ui_port,
'postgres': ports.DB}
port_results = {pn: net_check(pp, force=True) for pn, pp in port_numbers.items()}
for port, result in port_results.items():
if port == 'ssh' and store.current_appliance.is_pod:
# ssh is not available for podified appliance
continue
if not result:
raise AppliancePoliceException('Unable to connect', port_numbers[port])
try:
status_code = requests.get(store.current_appliance.url, verify=False,
timeout=120).status_code
except Exception:
raise AppliancePoliceException('Getting status code failed', port_numbers['https'])
if status_code != 200:
raise AppliancePoliceException('Status code was {}, should be 200'.format(
status_code), port_numbers['https'])
return
except AppliancePoliceException as e:
# special handling for known failure conditions
if e.port == 443:
# Lots of rdbs lately where evm seems to have entirely crashed
# and (sadly) the only fix is a rude restart
store.current_appliance.restart_evm_service(rude=True)
try:
store.current_appliance.wait_for_web_ui(900)
store.write_line('EVM was frozen and had to be restarted.', purple=True)
return
except TimedOutError:
pass
e_message = str(e)
except Exception as e:
e_message = str(e)
# Regardles of the exception raised, we didn't return anywhere above
# time to call a human
msg = 'Help! My appliance {} crashed with: {}'.format(store.current_appliance.url, e_message)
store.slave_manager.message(msg)
if 'appliance_police_recipients' in rdb:
rdb_kwargs = {
'subject': 'RDB Breakpoint: Appliance failure',
'recipients': rdb.appliance_police_recipients,
}
else:
rdb_kwargs = {}
Rdb(msg).set_trace(**rdb_kwargs)
store.slave_manager.message('Resuming testing following remote debugging')
| gpl-2.0 |
alheinecke/tensorflow-xsmm | tensorflow/contrib/tensor_forest/python/ops/data_ops.py | 36 | 6976 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for preprocessing data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.python.ops import tensor_forest_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import tf_logging as logging
# Data column types for indicating categorical or other non-float values.
DATA_FLOAT = 0
DATA_CATEGORICAL = 1
DTYPE_TO_FTYPE = {
dtypes.string: DATA_CATEGORICAL,
dtypes.int32: DATA_CATEGORICAL,
dtypes.int64: DATA_CATEGORICAL,
dtypes.float32: DATA_FLOAT,
dtypes.float64: DATA_FLOAT
}
def CastToFloat(tensor):
if tensor.dtype == dtypes.string:
return tensor_forest_ops.reinterpret_string_to_float(tensor)
elif tensor.dtype.is_integer:
return math_ops.to_float(tensor)
else:
return tensor
# TODO(gilberth): If protos are ever allowed in dynamically loaded custom
# op libraries, convert this to a proto like a sane person.
class TensorForestDataSpec(object):
def __init__(self):
self.sparse = DataColumnCollection()
self.dense = DataColumnCollection()
self.dense_features_size = 0
def SerializeToString(self):
return 'dense_features_size: %d dense: [%s] sparse: [%s]' % (
self.dense_features_size, self.dense.SerializeToString(),
self.sparse.SerializeToString())
class DataColumnCollection(object):
"""Collection of DataColumns, meant to mimic a proto repeated field."""
def __init__(self):
self.cols = []
def add(self): # pylint: disable=invalid-name
self.cols.append(DataColumn())
return self.cols[-1]
def size(self): # pylint: disable=invalid-name
return len(self.cols)
def SerializeToString(self):
ret = ''
for c in self.cols:
ret += '{%s}' % c.SerializeToString()
return ret
class DataColumn(object):
def __init__(self):
self.name = ''
self.original_type = ''
self.size = 0
def SerializeToString(self):
return 'name: {0} original_type: {1} size: {2}'.format(self.name,
self.original_type,
self.size)
def ParseDataTensorOrDict(data):
"""Return a tensor to use for input data.
The incoming features can be a dict where keys are the string names of the
columns, which we turn into a single 2-D tensor.
Args:
data: `Tensor` or `dict` of `Tensor` objects.
Returns:
A 2-D tensor for input to tensor_forest, a keys tensor for the
tf.Examples if they exist, and a list of the type of each column
(e.g. continuous float, categorical).
"""
data_spec = TensorForestDataSpec()
if isinstance(data, dict):
dense_features_size = 0
dense_features = []
sparse_features = []
for k in sorted(data.keys()):
is_sparse = isinstance(data[k], sparse_tensor.SparseTensor)
if is_sparse:
# TODO(gilberth): support sparse categorical.
if data[k].dtype == dtypes.string:
logging.info('TensorForest does not support sparse categorical. '
'Transform it into a number with hash buckets.')
continue
elif data_spec.sparse.size() == 0:
col_spec = data_spec.sparse.add()
col_spec.original_type = DATA_FLOAT
col_spec.name = 'all_sparse'
col_spec.size = -1
sparse_features.append(
sparse_tensor.SparseTensor(data[
k].indices, CastToFloat(data[k].values), data[k].dense_shape))
else:
col_spec = data_spec.dense.add()
col_spec.original_type = DTYPE_TO_FTYPE[data[k].dtype]
col_spec.name = k
# the second dimension of get_shape should always be known.
shape = data[k].get_shape()
if len(shape) == 1:
col_spec.size = 1
else:
col_spec.size = shape[1].value
dense_features_size += col_spec.size
dense_features.append(CastToFloat(data[k]))
processed_dense_features = None
processed_sparse_features = None
if dense_features:
processed_dense_features = array_ops.concat(dense_features, 1)
data_spec.dense_features_size = dense_features_size
if sparse_features:
processed_sparse_features = sparse_ops.sparse_concat(1, sparse_features)
logging.info(data_spec.SerializeToString())
return processed_dense_features, processed_sparse_features, data_spec
elif isinstance(data, sparse_tensor.SparseTensor):
col_spec = data_spec.sparse.add()
col_spec.name = 'sparse_features'
col_spec.original_type = DTYPE_TO_FTYPE[data.dtype]
col_spec.size = -1
data_spec.dense_features_size = 0
return None, data, data_spec
else:
data = ops.convert_to_tensor(data)
col_spec = data_spec.dense.add()
col_spec.name = 'dense_features'
col_spec.original_type = DTYPE_TO_FTYPE[data.dtype]
col_spec.size = data.get_shape()[1]
data_spec.dense_features_size = col_spec.size
return data, None, data_spec
def ParseLabelTensorOrDict(labels):
"""Return a tensor to use for input labels to tensor_forest.
The incoming targets can be a dict where keys are the string names of the
columns, which we turn into a single 1-D tensor for classification or
2-D tensor for regression.
Converts sparse tensors to dense ones.
Args:
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A 2-D tensor for labels/outputs.
"""
if isinstance(labels, dict):
return math_ops.to_float(
array_ops.concat(
[
sparse_ops.sparse_tensor_to_dense(
labels[k], default_value=-1) if isinstance(
labels, sparse_tensor.SparseTensor) else labels[k]
for k in sorted(labels.keys())
],
1))
else:
if isinstance(labels, sparse_tensor.SparseTensor):
return math_ops.to_float(sparse_ops.sparse_tensor_to_dense(
labels, default_value=-1))
else:
return math_ops.to_float(labels)
| apache-2.0 |
auferack08/edx-platform | common/djangoapps/microsite_configuration/middleware.py | 56 | 1212 | """
This file implements the Middleware support for the Open edX platform.
A microsite enables the following features:
1) Mapping of sub-domain name to a 'brand', e.g. foo-university.edx.org
2) Present a landing page with a listing of courses that are specific to the 'brand'
3) Ability to swap out some branding elements in the website
"""
from microsite_configuration import microsite
class MicrositeMiddleware(object):
"""
Middleware class which will bind configuration information regarding 'microsites' on a per request basis.
The actual configuration information is taken from Django settings information
"""
def process_request(self, request):
"""
Middleware entry point on every request processing. This will associate a request's domain name
with a 'University' and any corresponding microsite configuration information
"""
microsite.clear()
domain = request.META.get('HTTP_HOST', None)
microsite.set_by_domain(domain)
return None
def process_response(self, request, response):
"""
Middleware entry point for request completion.
"""
microsite.clear()
return response
| agpl-3.0 |
ErasRasmuson/LA | LogAna/Taxi_LongRides_old.py | 1 | 2578 | # -*- coding: cp1252 -*-
"""
###############################################################################
HEADER: Taxi_LongRides.py
AUTHOR: Esa Heikkinen
DATE: 26.06.2018
DOCUMENT: -
VERSION: "$Id$"
REFERENCES: -
PURPOSE:
CHANGES: "$Log$"
###############################################################################
"""
from logdig_analyze_template import *
# ----------------------------- DATA-DRIVEN PART -----------------------------
VARIABLES = {
"STARTTIME-DATE": "2013-01-01",
"STARTTIME-TIME": "00:00:00",
"STOPTIME-DATE": "2013-01-01",
"STOPTIME-TIME": "01:40:00"
}
START = {
"state": "BEGIN",
"func": "start"
}
ESU["BEGIN"] = {
"esu_mode": "SEARCH_EVENT:First:NextRow",
"log_filename_expr": "TaxiRides_small.csv",
"log_varnames": "isStart=START",
"log_timecol_name": "startTime",
"log_start_time_expr": "<STARTTIME-BEGIN>,0",
"log_stop_time_expr": "<STOPTIME>,0",
"TF_state": "END",
"TF_func": "found_begin",
"TN_state": "STOP",
"TN_func": "exit_normal",
"TE_state": "STOP",
"TE_func": "exit_error",
"GUI_line_num": "0"
}
ESU["END"] = {
"esu_mode": "SEARCH_EVENT:First",
"log_filename_expr": "TaxiRides_small.csv_<SET-RIDEID>",
"log_varnames": "isStart=END",
"log_timecol_name": "startTime",
"log_start_time_expr": "<startTime>,0",
"log_stop_time_expr": "<startTime>,7200",
"TF_state": "BEGIN",
"TF_func": "found_end",
"TN_state": "BEGIN",
"TN_func": "not_found_end",
"TE_state": "STOP",
"TE_func": "exit_error",
"GUI_line_num": "1"
}
STOP = {
"func": ""
}
# ----------------------------- FUNCTION PART -----------------------------
def start():
set_datetime_variable("STARTTIME","STARTTIME-DATE","STARTTIME-TIME")
set_datetime_variable("STOPTIME","STOPTIME-DATE","STOPTIME-TIME")
set_sbk_file("Taxi_LongRides","SET-RIDEID","startTime","endTime")
copy_variable("STARTTIME-BEGIN","STARTTIME")
logfiles_data.read("/home/esa/projects/LA/LogFile/PreProsessed/TaxiRides/TaxiRides_small.csv","startTime")
logfiles_data.transform_operation_keyby("/home/esa/projects/LA/LogFile/PreProsessed/TaxiRides/TaxiRides_small.csv","rideId")
def found_begin():
print("found_begin")
copy_variable("SET-RIDEID","rideId")
copy_variable("STARTTIME-BEGIN","startTime")
def found_end():
print("found_end")
def not_found_end():
print("not_found_end")
copy_variable("STARTTIME-BEGIN","startTime")
print_sbk_file()
def exit_normal():
print("exit_normal")
def exit_error():
print("exit_error")
| gpl-3.0 |
edlabh/SickRage | lib/requests/packages/chardet/mbcharsetprober.py | 2924 | 3268 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
| gpl-3.0 |
FEniCS/ufl | test/test_apply_function_pullbacks.py | 1 | 12156 | #!/usr/bin/env py.test
# -*- coding: utf-8 -*-
from pytest import raises
from ufl import *
from ufl.algorithms.apply_function_pullbacks import apply_function_pullbacks, apply_single_function_pullbacks
from ufl.algorithms.renumbering import renumber_indices
from ufl.classes import Jacobian, JacobianInverse, JacobianDeterminant, ReferenceValue, CellOrientation
def check_single_function_pullback(g, mappings):
expected = mappings[g]
actual = apply_single_function_pullbacks(g)
rexp = renumber_indices(expected)
ract = renumber_indices(actual)
if not rexp == ract:
print()
print("In check_single_function_pullback:")
print("input:")
print(repr(g))
print("expected:")
print(str(rexp))
print("actual:")
print(str(ract))
print("signatures:")
print((expected**2*dx).signature())
print((actual**2*dx).signature())
print()
assert ract == rexp
def test_apply_single_function_pullbacks_triangle3d():
triangle3d = Cell("triangle", geometric_dimension=3)
cell = triangle3d
domain = as_domain(cell)
UL2 = FiniteElement("DG L2", cell, 1)
U0 = FiniteElement("DG", cell, 0)
U = FiniteElement("CG", cell, 1)
V = VectorElement("CG", cell, 1)
Vd = FiniteElement("RT", cell, 1)
Vc = FiniteElement("N1curl", cell, 1)
T = TensorElement("CG", cell, 1)
S = TensorElement("CG", cell, 1, symmetry=True)
COV2T = FiniteElement("Regge", cell, 0) # (0, 2)-symmetric tensors
CONTRA2T = FiniteElement("HHJ", cell, 0) # (2, 0)-symmetric tensors
Uml2 = UL2*UL2
Um = U*U
Vm = U*V
Vdm = V*Vd
Vcm = Vd*Vc
Tm = Vc*T
Sm = T*S
Vd0 = Vd*U0 # case from failing ffc demo
W = S*T*Vc*Vd*V*U
ul2 = Coefficient(UL2)
u = Coefficient(U)
v = Coefficient(V)
vd = Coefficient(Vd)
vc = Coefficient(Vc)
t = Coefficient(T)
s = Coefficient(S)
cov2t = Coefficient(COV2T)
contra2t = Coefficient(CONTRA2T)
uml2 = Coefficient(Uml2)
um = Coefficient(Um)
vm = Coefficient(Vm)
vdm = Coefficient(Vdm)
vcm = Coefficient(Vcm)
tm = Coefficient(Tm)
sm = Coefficient(Sm)
vd0m = Coefficient(Vd0) # case from failing ffc demo
w = Coefficient(W)
rul2 = ReferenceValue(ul2)
ru = ReferenceValue(u)
rv = ReferenceValue(v)
rvd = ReferenceValue(vd)
rvc = ReferenceValue(vc)
rt = ReferenceValue(t)
rs = ReferenceValue(s)
rcov2t = ReferenceValue(cov2t)
rcontra2t = ReferenceValue(contra2t)
ruml2 = ReferenceValue(uml2)
rum = ReferenceValue(um)
rvm = ReferenceValue(vm)
rvdm = ReferenceValue(vdm)
rvcm = ReferenceValue(vcm)
rtm = ReferenceValue(tm)
rsm = ReferenceValue(sm)
rvd0m = ReferenceValue(vd0m)
rw = ReferenceValue(w)
assert len(w) == 9 + 9 + 3 + 3 + 3 + 1
assert len(rw) == 6 + 9 + 2 + 2 + 3 + 1
assert len(w) == 28
assert len(rw) == 23
assert len(vd0m) == 4
assert len(rvd0m) == 3
# Geometric quantities we need:
J = Jacobian(domain)
detJ = JacobianDeterminant(domain)
Jinv = JacobianInverse(domain)
# o = CellOrientation(domain)
i, j, k, l = indices(4)
# Contravariant H(div) Piola mapping:
M_hdiv = ((1.0/detJ) * J) # Not applying cell orientation here
# Covariant H(curl) Piola mapping: Jinv.T
mappings = {
# Simple elements should get a simple representation
ul2: rul2 / detJ,
u: ru,
v: rv,
vd: as_vector(M_hdiv[i, j]*rvd[j], i),
vc: as_vector(Jinv[j, i]*rvc[j], i),
t: rt,
s: as_tensor([[rs[0], rs[1], rs[2]],
[rs[1], rs[3], rs[4]],
[rs[2], rs[4], rs[5]]]),
cov2t: as_tensor(Jinv[k, i] * rcov2t[k, l] * Jinv[l, j], (i, j)),
contra2t: as_tensor((1.0 / detJ) * (1.0 / detJ)
* J[i, k] * rcontra2t[k, l] * J[j, l], (i, j)),
# Mixed elements become a bit more complicated
uml2: as_vector([ruml2[0] / detJ, ruml2[1] / detJ]),
um: rum,
vm: rvm,
vdm: as_vector([
# V
rvdm[0],
rvdm[1],
rvdm[2],
# Vd
M_hdiv[0, j]*as_vector([rvdm[3], rvdm[4]])[j],
M_hdiv[1, j]*as_vector([rvdm[3], rvdm[4]])[j],
M_hdiv[2, j]*as_vector([rvdm[3], rvdm[4]])[j],
]),
vcm: as_vector([
# Vd
M_hdiv[0, j]*as_vector([rvcm[0], rvcm[1]])[j],
M_hdiv[1, j]*as_vector([rvcm[0], rvcm[1]])[j],
M_hdiv[2, j]*as_vector([rvcm[0], rvcm[1]])[j],
# Vc
Jinv[i, 0]*as_vector([rvcm[2], rvcm[3]])[i],
Jinv[i, 1]*as_vector([rvcm[2], rvcm[3]])[i],
Jinv[i, 2]*as_vector([rvcm[2], rvcm[3]])[i],
]),
tm: as_vector([
# Vc
Jinv[i, 0]*as_vector([rtm[0], rtm[1]])[i],
Jinv[i, 1]*as_vector([rtm[0], rtm[1]])[i],
Jinv[i, 2]*as_vector([rtm[0], rtm[1]])[i],
# T
rtm[2], rtm[3], rtm[4],
rtm[5], rtm[6], rtm[7],
rtm[8], rtm[9], rtm[10],
]),
sm: as_vector([
# T
rsm[0], rsm[1], rsm[2],
rsm[3], rsm[4], rsm[5],
rsm[6], rsm[7], rsm[8],
# S
rsm[9], rsm[10], rsm[11],
rsm[10], rsm[12], rsm[13],
rsm[11], rsm[13], rsm[14],
]),
# Case from failing ffc demo:
vd0m: as_vector([
M_hdiv[0, j]*as_vector([rvd0m[0], rvd0m[1]])[j],
M_hdiv[1, j]*as_vector([rvd0m[0], rvd0m[1]])[j],
M_hdiv[2, j]*as_vector([rvd0m[0], rvd0m[1]])[j],
rvd0m[2]
]),
# This combines it all:
w: as_vector([
# S
rw[0], rw[1], rw[2],
rw[1], rw[3], rw[4],
rw[2], rw[4], rw[5],
# T
rw[6], rw[7], rw[8],
rw[9], rw[10], rw[11],
rw[12], rw[13], rw[14],
# Vc
Jinv[i, 0]*as_vector([rw[15], rw[16]])[i],
Jinv[i, 1]*as_vector([rw[15], rw[16]])[i],
Jinv[i, 2]*as_vector([rw[15], rw[16]])[i],
# Vd
M_hdiv[0, j]*as_vector([rw[17], rw[18]])[j],
M_hdiv[1, j]*as_vector([rw[17], rw[18]])[j],
M_hdiv[2, j]*as_vector([rw[17], rw[18]])[j],
# V
rw[19],
rw[20],
rw[21],
# U
rw[22],
]),
}
# Check functions of various elements outside a mixed context
check_single_function_pullback(ul2, mappings)
check_single_function_pullback(u, mappings)
check_single_function_pullback(v, mappings)
check_single_function_pullback(vd, mappings)
check_single_function_pullback(vc, mappings)
check_single_function_pullback(t, mappings)
check_single_function_pullback(s, mappings)
check_single_function_pullback(cov2t, mappings)
check_single_function_pullback(contra2t, mappings)
# Check functions of various elements inside a mixed context
check_single_function_pullback(uml2, mappings)
check_single_function_pullback(um, mappings)
check_single_function_pullback(vm, mappings)
check_single_function_pullback(vdm, mappings)
check_single_function_pullback(vcm, mappings)
check_single_function_pullback(tm, mappings)
check_single_function_pullback(sm, mappings)
# Check the ridiculous mixed element W combining it all
check_single_function_pullback(w, mappings)
def test_apply_single_function_pullbacks_triangle():
cell = triangle
domain = as_domain(cell)
Ul2 = FiniteElement("DG L2", cell, 1)
U = FiniteElement("CG", cell, 1)
V = VectorElement("CG", cell, 1)
Vd = FiniteElement("RT", cell, 1)
Vc = FiniteElement("N1curl", cell, 1)
T = TensorElement("CG", cell, 1)
S = TensorElement("CG", cell, 1, symmetry=True)
Uml2 = Ul2*Ul2
Um = U*U
Vm = U*V
Vdm = V*Vd
Vcm = Vd*Vc
Tm = Vc*T
Sm = T*S
W = S*T*Vc*Vd*V*U
ul2 = Coefficient(Ul2)
u = Coefficient(U)
v = Coefficient(V)
vd = Coefficient(Vd)
vc = Coefficient(Vc)
t = Coefficient(T)
s = Coefficient(S)
uml2 = Coefficient(Uml2)
um = Coefficient(Um)
vm = Coefficient(Vm)
vdm = Coefficient(Vdm)
vcm = Coefficient(Vcm)
tm = Coefficient(Tm)
sm = Coefficient(Sm)
w = Coefficient(W)
rul2 = ReferenceValue(ul2)
ru = ReferenceValue(u)
rv = ReferenceValue(v)
rvd = ReferenceValue(vd)
rvc = ReferenceValue(vc)
rt = ReferenceValue(t)
rs = ReferenceValue(s)
ruml2 = ReferenceValue(uml2)
rum = ReferenceValue(um)
rvm = ReferenceValue(vm)
rvdm = ReferenceValue(vdm)
rvcm = ReferenceValue(vcm)
rtm = ReferenceValue(tm)
rsm = ReferenceValue(sm)
rw = ReferenceValue(w)
assert len(w) == 4 + 4 + 2 + 2 + 2 + 1
assert len(rw) == 3 + 4 + 2 + 2 + 2 + 1
assert len(w) == 15
assert len(rw) == 14
# Geometric quantities we need:
J = Jacobian(domain)
detJ = JacobianDeterminant(domain)
Jinv = JacobianInverse(domain)
i, j, k, l = indices(4)
# Contravariant H(div) Piola mapping:
M_hdiv = (1.0/detJ) * J
# Covariant H(curl) Piola mapping: Jinv.T
mappings = {
# Simple elements should get a simple representation
ul2: rul2 / detJ,
u: ru,
v: rv,
vd: as_vector(M_hdiv[i, j]*rvd[j], i),
vc: as_vector(Jinv[j, i]*rvc[j], i),
t: rt,
s: as_tensor([[rs[0], rs[1]], [rs[1], rs[2]]]),
# Mixed elements become a bit more complicated
uml2: as_vector([ruml2[0] / detJ, ruml2[1] / detJ]),
um: rum,
vm: rvm,
vdm: as_vector([
# V
rvdm[0],
rvdm[1],
# Vd
M_hdiv[0, j]*as_vector([rvdm[2], rvdm[3]])[j],
M_hdiv[1, j]*as_vector([rvdm[2], rvdm[3]])[j],
]),
vcm: as_vector([
# Vd
M_hdiv[0, j]*as_vector([rvcm[0], rvcm[1]])[j],
M_hdiv[1, j]*as_vector([rvcm[0], rvcm[1]])[j],
# Vc
Jinv[i, 0]*as_vector([rvcm[2], rvcm[3]])[i],
Jinv[i, 1]*as_vector([rvcm[2], rvcm[3]])[i],
]),
tm: as_vector([
# Vc
Jinv[i, 0]*as_vector([rtm[0], rtm[1]])[i],
Jinv[i, 1]*as_vector([rtm[0], rtm[1]])[i],
# T
rtm[2], rtm[3],
rtm[4], rtm[5],
]),
sm: as_vector([
# T
rsm[0], rsm[1],
rsm[2], rsm[3],
# S
rsm[4], rsm[5],
rsm[5], rsm[6],
]),
# This combines it all:
w: as_vector([
# S
rw[0], rw[1],
rw[1], rw[2],
# T
rw[3], rw[4],
rw[5], rw[6],
# Vc
Jinv[i, 0]*as_vector([rw[7], rw[8]])[i],
Jinv[i, 1]*as_vector([rw[7], rw[8]])[i],
# Vd
M_hdiv[0, j]*as_vector([rw[9], rw[10]])[j],
M_hdiv[1, j]*as_vector([rw[9], rw[10]])[j],
# V
rw[11],
rw[12],
# U
rw[13],
]),
}
# Check functions of various elements outside a mixed context
check_single_function_pullback(ul2, mappings)
check_single_function_pullback(u, mappings)
check_single_function_pullback(v, mappings)
check_single_function_pullback(vd, mappings)
check_single_function_pullback(vc, mappings)
check_single_function_pullback(t, mappings)
check_single_function_pullback(s, mappings)
# Check functions of various elements inside a mixed context
check_single_function_pullback(uml2, mappings)
check_single_function_pullback(um, mappings)
check_single_function_pullback(vm, mappings)
check_single_function_pullback(vdm, mappings)
check_single_function_pullback(vcm, mappings)
check_single_function_pullback(tm, mappings)
check_single_function_pullback(sm, mappings)
# Check the ridiculous mixed element W combining it all
check_single_function_pullback(w, mappings)
| lgpl-3.0 |
ovnicraft/odoo | addons/portal_project/tests/__init__.py | 260 | 1086 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_access_rights
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
fbradyirl/home-assistant | homeassistant/components/spc/alarm_control_panel.py | 2 | 3187 | """Support for Vanderbilt (formerly Siemens) SPC alarm systems."""
import logging
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import DATA_API, SIGNAL_UPDATE_ALARM
_LOGGER = logging.getLogger(__name__)
def _get_alarm_state(area):
"""Get the alarm state."""
from pyspcwebgw.const import AreaMode
if area.verified_alarm:
return STATE_ALARM_TRIGGERED
mode_to_state = {
AreaMode.UNSET: STATE_ALARM_DISARMED,
AreaMode.PART_SET_A: STATE_ALARM_ARMED_HOME,
AreaMode.PART_SET_B: STATE_ALARM_ARMED_NIGHT,
AreaMode.FULL_SET: STATE_ALARM_ARMED_AWAY,
}
return mode_to_state.get(area.mode)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the SPC alarm control panel platform."""
if discovery_info is None:
return
api = hass.data[DATA_API]
async_add_entities([SpcAlarm(area=area, api=api) for area in api.areas.values()])
class SpcAlarm(alarm.AlarmControlPanel):
"""Representation of the SPC alarm panel."""
def __init__(self, area, api):
"""Initialize the SPC alarm panel."""
self._area = area
self._api = api
async def async_added_to_hass(self):
"""Call for adding new entities."""
async_dispatcher_connect(
self.hass, SIGNAL_UPDATE_ALARM.format(self._area.id), self._update_callback
)
@callback
def _update_callback(self):
"""Call update method."""
self.async_schedule_update_ha_state(True)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._area.name
@property
def changed_by(self):
"""Return the user the last change was triggered by."""
return self._area.last_changed_by
@property
def state(self):
"""Return the state of the device."""
return _get_alarm_state(self._area)
async def async_alarm_disarm(self, code=None):
"""Send disarm command."""
from pyspcwebgw.const import AreaMode
await self._api.change_mode(area=self._area, new_mode=AreaMode.UNSET)
async def async_alarm_arm_home(self, code=None):
"""Send arm home command."""
from pyspcwebgw.const import AreaMode
await self._api.change_mode(area=self._area, new_mode=AreaMode.PART_SET_A)
async def async_alarm_arm_night(self, code=None):
"""Send arm home command."""
from pyspcwebgw.const import AreaMode
await self._api.change_mode(area=self._area, new_mode=AreaMode.PART_SET_B)
async def async_alarm_arm_away(self, code=None):
"""Send arm away command."""
from pyspcwebgw.const import AreaMode
await self._api.change_mode(area=self._area, new_mode=AreaMode.FULL_SET)
| apache-2.0 |
bubenkoff/Arkestra | vacancies_and_studentships/tests.py | 1 | 22863 | from datetime import datetime, timedelta
from django.test import TestCase
from django.test.client import Client
from django.test.utils import override_settings
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import HttpRequest, QueryDict
from cms.api import create_page
from contacts_and_people.models import Person
from models import Vacancy, Studentship
from lister import (
List, VacanciesAndStudentshipsPluginLister, FilterList
)
from contacts_and_people.models import Entity
class VacanciesTests(TestCase):
def setUp(self):
# Every test needs a client.
self.client = Client()
self.toothjob = Vacancy(
title = "Pulling teeth",
slug = "pulling-teeth",
date = datetime.now() + timedelta(days=30),
)
def test_generic_attributes(self):
self.toothjob.save()
# the item has no informative content
self.assertEqual(self.toothjob.is_uninformative, True)
# there are no Entities in the database, so this can't be hosted_by anything
self.assertEqual(self.toothjob.hosted_by, None)
# since there are no Entities in the database, default to settings's template
self.assertEqual(self.toothjob.get_template, settings.CMS_TEMPLATES[0][0])
def test_date_related_attributes(self):
self.toothjob.date = datetime(year=2012, month=12, day=12)
self.assertEqual(self.toothjob.get_when, "December 2012")
def test_link_to_more(self):
self.assertEqual(
self.toothjob.auto_page_view_name,
"vacancies-and-studentships"
)
self.toothjob.hosted_by = Entity(slug="slug")
self.assertEqual(
self.toothjob.link_to_more(),
"/vacancies-and-studentships/slug/"
)
@override_settings(CMS_TEMPLATES = (('null.html', "Null"),))
class VacanciesItemsViewsTests(TestCase):
def setUp(self):
# Every test needs a client.
self.client = Client()
# create a vacancy item
self.toothjob = Vacancy(
title = "Pulling teeth",
slug = "pulling-teeth",
date = datetime.now() + timedelta(days=30),
)
self.adminuser = User.objects.create_user('arkestra', 'arkestra@example.com', 'arkestra')
self.adminuser.is_staff=True
self.adminuser.save()
# vacancy tests
def test_unpublished_vacancy_404(self):
self.toothjob.save()
# Issue a GET request.
response = self.client.get('/vacancy/pulling-teeth/')
# Check that the response is 404 because it's not published
self.assertEqual(response.status_code, 404)
def test_unpublished_vacancy_200_for_admin(self):
self.toothjob.save()
# log in a staff user
self.client.login(username='arkestra', password='arkestra')
response = self.client.get('/vacancy/pulling-teeth/')
self.assertEqual(response.status_code, 200)
def test_published_vacancy_200_for_everyone(self):
self.toothjob.published = True
self.toothjob.save()
# Check that the response is 200 OK.
response = self.client.get('/vacancy/pulling-teeth/')
self.assertEqual(response.status_code, 200)
def test_published_vacancy_context(self):
self.toothjob.published = True
self.toothjob.save()
response = self.client.get('/vacancy/pulling-teeth/')
self.assertEqual(response.context['vacancy'], self.toothjob)
@override_settings(CMS_TEMPLATES = (('null.html', "Null"),))
class StudentshipsItemsViewsTests(TestCase):
def setUp(self):
# Every test needs a client.
self.client = Client()
# create a studentship item
self.toothjob = Studentship(
title = "Pulling teeth",
slug = "pulling-teeth",
date = datetime.now() + timedelta(days=30),
)
self.adminuser = User.objects.create_user('arkestra', 'arkestra@example.com', 'arkestra')
self.adminuser.is_staff=True
self.adminuser.save()
# studentship tests
def test_unpublished_studentship_404(self):
self.toothjob.save()
# Issue a GET request.
response = self.client.get('/studentship/pulling-teeth/')
# Check that the response is 404 because it's not published
self.assertEqual(response.status_code, 404)
def test_unpublished_studentship_200_for_admin(self):
self.toothjob.save()
# log in a staff user
self.client.login(username='arkestra', password='arkestra')
response = self.client.get('/studentship/pulling-teeth/')
self.assertEqual(response.status_code, 200)
def test_published_studentship_200_for_everyone(self):
self.toothjob.published = True
self.toothjob.save()
# Check that the response is 200 OK.
response = self.client.get('/studentship/pulling-teeth/')
self.assertEqual(response.status_code, 200)
def test_published_studentship_context(self):
self.toothjob.published = True
self.toothjob.save()
response = self.client.get('/studentship/pulling-teeth/')
self.assertEqual(response.context['studentship'], self.toothjob)
class ReverseURLsTests(TestCase):
def test_vacancy_reverse_url(self):
self.assertEqual(
reverse("vacancy", kwargs={"slug": "tooth-puller"}),
"/vacancy/tooth-puller/"
)
def test_studentship_reverse_url(self):
self.assertEqual(
reverse("studentship", kwargs={"slug": "tooth-puller"}),
"/studentship/tooth-puller/"
)
def test_archived_vacancies_base_reverse_url(self):
self.assertEqual(
reverse("vacancies-archive"),
"/archived-vacancies/"
)
def test_archived_vacancies_reverse_url(self):
self.assertEqual(
reverse("vacancies-archive", kwargs={"slug": "some-slug"}),
"/archived-vacancies/some-slug/"
)
def test_current_vacancies_base_reverse_url(self):
self.assertEqual(
reverse("vacancies-current"),
"/vacancies/"
)
def test_current_vacancies_reverse_url(self):
self.assertEqual(
reverse("vacancies-current", kwargs={"slug": "some-slug"}),
"/vacancies/some-slug/"
)
def test_archived_studentships_base_reverse_url(self):
self.assertEqual(
reverse("studentships-archive"),
"/archived-studentships/"
)
def test_archived_studentships_reverse_url(self):
self.assertEqual(
reverse("studentships-archive", kwargs={"slug": "some-slug"}),
"/archived-studentships/some-slug/"
)
def test_current_studentships_base_reverse_url(self):
self.assertEqual(
reverse("studentships-current"),
"/studentships/"
)
def test_current_studentships_reverse_url(self):
self.assertEqual(
reverse("studentships-current", kwargs={"slug": "some-slug"}),
"/studentships/some-slug/"
)
def test_base_reverse_url(self):
self.assertEqual(
reverse("vacancies-and-studentships"),
"/vacancies-and-studentships/"
)
def test_reverse_url(self):
self.assertEqual(
reverse("vacancies-and-studentships", kwargs={"slug": "some-slug"}),
"/vacancies-and-studentships/some-slug/"
)
@override_settings(CMS_TEMPLATES = (('null.html', "Null"),))
class VacanciesStudentshipsEntityPagesViewsTests(TestCase):
def setUp(self):
# Every test needs a client.
self.client = Client()
home_page = create_page(
"School home page",
"null.html",
"en",
published=True
)
self.school = Entity(
name="School of Medicine",
slug="medicine",
auto_vacancies_page=True,
website=home_page
)
# entity vacancies and studentships URLs - has vacancies and studentships pages
def test_main_url(self):
self.school.save()
response = self.client.get('/vacancies-and-studentships/')
self.assertEqual(response.status_code, 200)
def test_entity_url(self):
self.school.save()
response = self.client.get('/vacancies-and-studentships/medicine/')
self.assertEqual(response.status_code, 200)
def test_bogus_entity_url(self):
self.school.save()
response = self.client.get('/vacancies-and-studentships/xxxx/')
self.assertEqual(response.status_code, 404)
def test_main_archive_url(self):
self.school.save()
response = self.client.get('/archived-vacancies/')
self.assertEqual(response.status_code, 200)
def test_entity_vacancies_archive_url(self):
self.school.save()
response = self.client.get('/archived-vacancies/medicine/')
self.assertEqual(response.status_code, 200)
def test_bogus_entity_vacancies_archive_url(self):
self.school.save()
response = self.client.get('/archived-vacancies/xxxx/')
self.assertEqual(response.status_code, 404)
def test_main_archived_studentships_url(self):
self.school.save()
response = self.client.get('/archived-studentships/')
self.assertEqual(response.status_code, 200)
def test_entity_archived_studentships_url(self):
self.school.save()
response = self.client.get('/archived-studentships/medicine/')
self.assertEqual(response.status_code, 200)
def test_bogus_entity_archived_studentships_url(self):
self.school.save()
response = self.client.get('/archived-studentships/xxxx/')
self.assertEqual(response.status_code, 404)
def test_main_all_current_studentships_url(self):
self.school.save()
response = self.client.get('/studentships/')
self.assertEqual(response.status_code, 200)
def test_entity_all_current_studentships_url(self):
self.school.save()
response = self.client.get('/studentships/medicine/')
self.assertEqual(response.status_code, 200)
def test_bogus_entity_all_current_studentships_url(self):
self.school.save()
response = self.client.get('/current-studentships/xxx/')
self.assertEqual(response.status_code, 404)
# entity vacancies and studentships URLs - no vacancies and studentships pages
def test_no_auto_page_main_url(self):
self.school.auto_vacancies_page = False
self.school.save()
response = self.client.get('/vacancies-and-studentships/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_entity_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/vacancies-and-studentships/medicine/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_bogus_entity_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/vacancies-and-studentships/xxxx/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_main_archive_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/archived-vacancies/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_entity_vacancies_archive_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/archived-vacancies/medicine/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_bogus_entity_vacancies_archive_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/archived-vacancies/xxxx/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_main_archived_studentships_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/studentships-archive/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_entity_archived_studentships_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/studentships-archive/medicine/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_bogus_entity_archived_studentships_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/studentships-archive/xxxx/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_main_all_current_studentships_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/current-studentships/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_entity_all_current_studentships_url(self):
self.school.auto_vacancies_page = False
self.school.save()
response = self.client.get('/current-studentships/medicine/')
self.assertEqual(response.status_code, 404)
def test_no_auto_page_bogus_entity_all_current_studentships_url(self):
self.school.auto_vacancies_page= False
self.school.save()
response = self.client.get('/current-studentships/xxx/')
self.assertEqual(response.status_code, 404)
# entity vacancies and studentships URLs - no entity home page
def test_no_entity_home_page_main_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/vacancies-and-studentships/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_entity_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/vacancies-and-studentships/medicine/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_bogus_entity_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/vacancies-and-studentships/xxxx/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_main_archive_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/archived-vacancies/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_entity_vacancies_archive_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/archived-vacancies/medicine/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_bogus_entity_vacancies_archive_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/archived-vacancies/xxxx/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_main_archived_studentships_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/studentships-archive/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_entity_archived_studentships_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/studentships-archive/medicine/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_bogus_entity_archived_studentships_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/studentships-archive/xxxx/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_main_all_current_studentships_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/current-studentships/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_entity_all_current_studentships_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/current-studentships/medicine/')
self.assertEqual(response.status_code, 404)
def test_no_entity_home_page_bogus_entity_all_current_studentships_url(self):
self.school.website = None
self.school.save()
response = self.client.get('/current-studentships/xxx/')
self.assertEqual(response.status_code, 404)
class ListTests(TestCase):
def setUp(self):
self.item1 = Vacancy(
title="closes today, less important",
in_lists=True,
published=True,
date=datetime.now()
)
self.item1.save()
self.item2 = Vacancy(
title="closed 20 days ago, important",
summary="a job for today",
in_lists=True,
published=True,
date=datetime.now()-timedelta(days=20),
importance=3,
slug="item2"
)
self.item2.save()
self.item3 = Vacancy(
title="closes in the future",
in_lists=True,
published=True,
date=datetime.now()+timedelta(days=20),
importance=3,
slug="item3"
)
self.item3.save()
self.itemlist = List()
self.itemlist.model = Vacancy
self.itemlist.items = Vacancy.objects.all()
def test_all_items_order(self):
self.assertEqual(
list(self.itemlist.items),
[self.item2, self.item1, self.item3]
)
def test_reorder_by_importance_date_only(self):
# check the re-ordered items are not changed
self.itemlist.re_order_by_importance()
self.assertEqual(
list(self.itemlist.items),
[self.item2, self.item1, self.item3]
)
def test_reorder_by_importance_date_makes_no_difference(self):
# check that items are re-ordered by importance
self.itemlist.order_by = "importance/date"
self.itemlist.re_order_by_importance()
self.assertEqual(
list(self.itemlist.items),
[self.item2, self.item1, self.item3]
)
def test_truncate_items(self):
# check that items are re-ordered by importance
self.itemlist.limit_to = 1
self.itemlist.truncate_items()
self.assertEqual(
list(self.itemlist.items),
[self.item2]
)
def test_set_items_for_person(self):
p = Person()
p.save()
self.item1.please_contact.add(p)
self.itemlist.person = p
self.itemlist.set_items_for_person()
self.assertEqual(
list(self.itemlist.items),
[self.item1]
)
def test_build(self):
self.itemlist.build()
self.assertEqual(list(self.itemlist.items), [self.item1, self.item3])
def test_other_items(self):
school = Entity(name="School of Medicine", short_name="Medicine")
school.save()
self.itemlist.entity = school
self.itemlist.other_item_kinds = ["archived", "open", "main"]
self.itemlist.build()
# "main" other items are always created; the others need tests to
# see if any exist
self.assertEqual(
self.itemlist.other_items(),
[{
'link': '/vacancies-and-studentships/',
'title': u'Medicine vacancies & studentships',
'css_class': 'main',
}]
)
# now we save some items
self.item1.hosted_by = school
self.item2.hosted_by = school
self.item3.hosted_by = school
self.item1.save()
self.item2.save()
self.item3.save()
self.itemlist.build()
self.assertEqual(list(self.itemlist.items), [self.item1, self.item3])
self.assertEqual(list(self.itemlist.archived), [self.item2])
self.assertEqual(
list(self.itemlist.other_items()),
[{
'count': 2,
'link': '/vacancies/',
'title': 'All open vacancies'
},
{
'count': 1,
'link': '/archived-vacancies/',
'title': 'Archived vacancies'
},
{
'link': '/vacancies-and-studentships/',
'title': u'Medicine vacancies & studentships',
'css_class': 'main',
},
]
)
class FilterListTests(TestCase):
def setUp(self):
self.item1 = Vacancy(
title="closes today, less important",
in_lists=True,
published=True,
date=datetime.now()
)
self.item1.save()
self.item2 = Vacancy(
title="closed 20 days ago, important",
summary="a job for today",
in_lists=True,
published=True,
date=datetime.now()-timedelta(days=20),
importance=3,
slug="item2"
)
self.item2.save()
self.item3 = Vacancy(
title="closes in the future",
in_lists=True,
published=True,
date=datetime.now()+timedelta(days=20),
importance=3,
slug="item3"
)
self.item3.save()
self.itemlist = FilterList()
self.itemlist.model = Vacancy
self.itemlist.request = HttpRequest()
def test_filter_on_search_terms_no_terms(self):
query = QueryDict("")
self.itemlist.request.GET = query
self.itemlist.build()
self.assertEqual(
list(self.itemlist.items),
[self.item1, self.item3]
)
def test_filter_on_search_terms_1_match(self):
query = QueryDict("text=today")
self.itemlist.request.GET = query
self.itemlist.build()
self.assertEqual(
list(self.itemlist.items),
[self.item1]
)
class PluginListerTests(TestCase):
def test_other_items(self):
lister = VacanciesAndStudentshipsPluginLister(
entity=Entity(slug="test")
)
self.assertItemsEqual(
lister.other_items(),
[{
'css_class': 'main',
'link': '/vacancies-and-studentships/test/',
'title': 'More '
}]
)
| bsd-2-clause |
t794104/ansible | lib/ansible/modules/network/fortios/fortios_firewall_local_in_policy6.py | 24 | 11300 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_local_in_policy6
short_description: Configure user defined IPv6 local-in policies in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure firewall feature and local_in_policy6 category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
firewall_local_in_policy6:
description:
- Configure user defined IPv6 local-in policies.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
action:
description:
- Action performed on traffic matching the policy (default = deny).
choices:
- accept
- deny
comments:
description:
- Comment.
dstaddr:
description:
- Destination address object from available options.
suboptions:
name:
description:
- Address name. Source firewall.address6.name firewall.addrgrp6.name.
required: true
intf:
description:
- Incoming interface name from available options. Source system.zone.name system.interface.name.
policyid:
description:
- User defined local in policy ID.
required: true
schedule:
description:
- Schedule object from available options. Source firewall.schedule.onetime.name firewall.schedule.recurring.name firewall.schedule.group
.name.
service:
description:
- Service object from available options. Separate names with a space.
suboptions:
name:
description:
- Service name. Source firewall.service.custom.name firewall.service.group.name.
required: true
srcaddr:
description:
- Source address object from available options.
suboptions:
name:
description:
- Address name. Source firewall.address6.name firewall.addrgrp6.name.
required: true
status:
description:
- Enable/disable this local-in policy.
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure user defined IPv6 local-in policies.
fortios_firewall_local_in_policy6:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
firewall_local_in_policy6:
state: "present"
action: "accept"
comments: "<your_own_value>"
dstaddr:
-
name: "default_name_6 (source firewall.address6.name firewall.addrgrp6.name)"
intf: "<your_own_value> (source system.zone.name system.interface.name)"
policyid: "8"
schedule: "<your_own_value> (source firewall.schedule.onetime.name firewall.schedule.recurring.name firewall.schedule.group.name)"
service:
-
name: "default_name_11 (source firewall.service.custom.name firewall.service.group.name)"
srcaddr:
-
name: "default_name_13 (source firewall.address6.name firewall.addrgrp6.name)"
status: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_firewall_local_in_policy6_data(json):
option_list = ['action', 'comments', 'dstaddr',
'intf', 'policyid', 'schedule',
'service', 'srcaddr', 'status']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def firewall_local_in_policy6(data, fos):
vdom = data['vdom']
firewall_local_in_policy6_data = data['firewall_local_in_policy6']
filtered_data = filter_firewall_local_in_policy6_data(firewall_local_in_policy6_data)
if firewall_local_in_policy6_data['state'] == "present":
return fos.set('firewall',
'local-in-policy6',
data=filtered_data,
vdom=vdom)
elif firewall_local_in_policy6_data['state'] == "absent":
return fos.delete('firewall',
'local-in-policy6',
mkey=filtered_data['policyid'],
vdom=vdom)
def fortios_firewall(data, fos):
login(data)
methodlist = ['firewall_local_in_policy6']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"firewall_local_in_policy6": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"action": {"required": False, "type": "str",
"choices": ["accept", "deny"]},
"comments": {"required": False, "type": "str"},
"dstaddr": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"intf": {"required": False, "type": "str"},
"policyid": {"required": True, "type": "int"},
"schedule": {"required": False, "type": "str"},
"service": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"srcaddr": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_firewall(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
jayceyxc/hue | desktop/core/ext-py/pysaml2-2.4.0/src/saml2/extension/dri.py | 37 | 13082 | #!/usr/bin/env python
#
# Generated Mon Oct 25 16:19:28 2010 by parse_xsd.py version 0.4.
#
import saml2
from saml2 import SamlBase
from saml2 import md
NAMESPACE = 'urn:oasis:names:tc:SAML:2.0:metadata:dri'
class CreationInstant(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:metadata:dri:CreationInstant element """
c_tag = 'CreationInstant'
c_namespace = NAMESPACE
c_value_type = {'base': 'datetime'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def creation_instant_from_string(xml_string):
return saml2.create_class_from_xml_string(CreationInstant, xml_string)
class SerialNumber(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:metadata:dri:SerialNumber element """
c_tag = 'SerialNumber'
c_namespace = NAMESPACE
c_value_type = {'base': 'string'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def serial_number_from_string(xml_string):
return saml2.create_class_from_xml_string(SerialNumber, xml_string)
class UsagePolicy(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:metadata:dri:UsagePolicy element """
c_tag = 'UsagePolicy'
c_namespace = NAMESPACE
c_value_type = {'base': 'anyURI'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def usage_policy_from_string(xml_string):
return saml2.create_class_from_xml_string(UsagePolicy, xml_string)
class PublisherType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:metadata:dri:PublisherType element """
c_tag = 'PublisherType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['PublisherID'] = ('publisher_id', 'md:entityIDType', True)
c_attributes['CreationInstant'] = ('creation_instant', 'datetime', False)
c_attributes['SerialNumber'] = ('serial_number', 'string', False)
def __init__(self,
publisher_id=None,
creation_instant=None,
serial_number=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.publisher_id = publisher_id
self.creation_instant = creation_instant
self.serial_number = serial_number
def publisher_type__from_string(xml_string):
return saml2.create_class_from_xml_string(PublisherType_, xml_string)
class RegistrationAuthority(md.EntityIDType_):
"""The urn:oasis:names:tc:SAML:2.0:metadata:dri:RegistrationAuthority
element """
c_tag = 'RegistrationAuthority'
c_namespace = NAMESPACE
c_children = md.EntityIDType_.c_children.copy()
c_attributes = md.EntityIDType_.c_attributes.copy()
c_child_order = md.EntityIDType_.c_child_order[:]
c_cardinality = md.EntityIDType_.c_cardinality.copy()
def registration_authority_from_string(xml_string):
return saml2.create_class_from_xml_string(RegistrationAuthority, xml_string)
class RegistrationInstant(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:metadata:dri:RegistrationInstant
element """
c_tag = 'RegistrationInstant'
c_namespace = NAMESPACE
c_value_type = {'base': 'datetime'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def registration_instant_from_string(xml_string):
return saml2.create_class_from_xml_string(RegistrationInstant, xml_string)
class RegistrationPolicy(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:metadata:dri:RegistrationPolicy
element """
c_tag = 'RegistrationPolicy'
c_namespace = NAMESPACE
c_value_type = {'base': 'anyURI'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def registration_policy_from_string(xml_string):
return saml2.create_class_from_xml_string(RegistrationPolicy, xml_string)
class Publisher(PublisherType_):
"""The urn:oasis:names:tc:SAML:2.0:metadata:dri:Publisher element """
c_tag = 'Publisher'
c_namespace = NAMESPACE
c_children = PublisherType_.c_children.copy()
c_attributes = PublisherType_.c_attributes.copy()
c_child_order = PublisherType_.c_child_order[:]
c_cardinality = PublisherType_.c_cardinality.copy()
def publisher_from_string(xml_string):
return saml2.create_class_from_xml_string(Publisher, xml_string)
class RegistrationInfoType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:metadata:dri:RegistrationInfoType
element """
c_tag = 'RegistrationInfoType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children[
'{urn:oasis:names:tc:SAML:2.0:metadata:dri}RegistrationAuthority'] = (
'registration_authority', RegistrationAuthority)
c_children[
'{urn:oasis:names:tc:SAML:2.0:metadata:dri}RegistrationInstant'] = (
'registration_instant', RegistrationInstant)
c_children[
'{urn:oasis:names:tc:SAML:2.0:metadata:dri}RegistrationPolicy'] = (
'registration_policy', RegistrationPolicy)
c_cardinality['registration_policy'] = {"min": 0, "max": 1}
c_child_order.extend(['registration_authority', 'registration_instant',
'registration_policy'])
def __init__(self,
registration_authority=None,
registration_instant=None,
registration_policy=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.registration_authority = registration_authority
self.registration_instant = registration_instant
self.registration_policy = registration_policy
def registration_info_type__from_string(xml_string):
return saml2.create_class_from_xml_string(RegistrationInfoType_, xml_string)
class PublishersType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:metadata:dri:PublishersType element """
c_tag = 'PublishersType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:metadata:dri}Publisher'] = (
'publisher', [Publisher])
c_cardinality['publisher'] = {"min": 0}
c_child_order.extend(['publisher'])
def __init__(self,
publisher=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.publisher = publisher or []
def publishers_type__from_string(xml_string):
return saml2.create_class_from_xml_string(PublishersType_, xml_string)
class RegistrationInfo(RegistrationInfoType_):
"""The urn:oasis:names:tc:SAML:2.0:metadata:dri:RegistrationInfo element """
c_tag = 'RegistrationInfo'
c_namespace = NAMESPACE
c_children = RegistrationInfoType_.c_children.copy()
c_attributes = RegistrationInfoType_.c_attributes.copy()
c_child_order = RegistrationInfoType_.c_child_order[:]
c_cardinality = RegistrationInfoType_.c_cardinality.copy()
def registration_info_from_string(xml_string):
return saml2.create_class_from_xml_string(RegistrationInfo, xml_string)
class Publishers(PublishersType_):
"""The urn:oasis:names:tc:SAML:2.0:metadata:dri:Publishers element """
c_tag = 'Publishers'
c_namespace = NAMESPACE
c_children = PublishersType_.c_children.copy()
c_attributes = PublishersType_.c_attributes.copy()
c_child_order = PublishersType_.c_child_order[:]
c_cardinality = PublishersType_.c_cardinality.copy()
def publishers_from_string(xml_string):
return saml2.create_class_from_xml_string(Publishers, xml_string)
class DocumentInfoType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:metadata:dri:DocumentInfoType element """
c_tag = 'DocumentInfoType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:metadata:dri}CreationInstant'] = (
'creation_instant', CreationInstant)
c_cardinality['creation_instant'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:metadata:dri}SerialNumber'] = (
'serial_number', SerialNumber)
c_cardinality['serial_number'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:metadata:dri}UsagePolicy'] = (
'usage_policy', UsagePolicy)
c_cardinality['usage_policy'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:metadata:dri}Publishers'] = (
'publishers', Publishers)
c_cardinality['publishers'] = {"min": 0, "max": 1}
c_child_order.extend(
['creation_instant', 'serial_number', 'usage_policy', 'publishers'])
def __init__(self,
creation_instant=None,
serial_number=None,
usage_policy=None,
publishers=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.creation_instant = creation_instant
self.serial_number = serial_number
self.usage_policy = usage_policy
self.publishers = publishers
def document_info_type__from_string(xml_string):
return saml2.create_class_from_xml_string(DocumentInfoType_, xml_string)
class DocumentInfo(DocumentInfoType_):
"""The urn:oasis:names:tc:SAML:2.0:metadata:dri:DocumentInfo element """
c_tag = 'DocumentInfo'
c_namespace = NAMESPACE
c_children = DocumentInfoType_.c_children.copy()
c_attributes = DocumentInfoType_.c_attributes.copy()
c_child_order = DocumentInfoType_.c_child_order[:]
c_cardinality = DocumentInfoType_.c_cardinality.copy()
def document_info_from_string(xml_string):
return saml2.create_class_from_xml_string(DocumentInfo, xml_string)
ELEMENT_FROM_STRING = {
DocumentInfo.c_tag: document_info_from_string,
DocumentInfoType_.c_tag: document_info_type__from_string,
CreationInstant.c_tag: creation_instant_from_string,
SerialNumber.c_tag: serial_number_from_string,
UsagePolicy.c_tag: usage_policy_from_string,
Publishers.c_tag: publishers_from_string,
PublishersType_.c_tag: publishers_type__from_string,
Publisher.c_tag: publisher_from_string,
PublisherType_.c_tag: publisher_type__from_string,
RegistrationInfo.c_tag: registration_info_from_string,
RegistrationInfoType_.c_tag: registration_info_type__from_string,
RegistrationAuthority.c_tag: registration_authority_from_string,
RegistrationInstant.c_tag: registration_instant_from_string,
RegistrationPolicy.c_tag: registration_policy_from_string,
}
ELEMENT_BY_TAG = {
'DocumentInfo': DocumentInfo,
'DocumentInfoType': DocumentInfoType_,
'CreationInstant': CreationInstant,
'SerialNumber': SerialNumber,
'UsagePolicy': UsagePolicy,
'Publishers': Publishers,
'PublishersType': PublishersType_,
'Publisher': Publisher,
'PublisherType': PublisherType_,
'RegistrationInfo': RegistrationInfo,
'RegistrationInfoType': RegistrationInfoType_,
'RegistrationAuthority': RegistrationAuthority,
'RegistrationInstant': RegistrationInstant,
'RegistrationPolicy': RegistrationPolicy,
}
def factory(tag, **kwargs):
return ELEMENT_BY_TAG[tag](**kwargs)
| apache-2.0 |
aspidites/django | tests/save_delete_hooks/models.py | 409 | 1030 | """
Adding hooks before/after saving and deleting
To execute arbitrary code around ``save()`` and ``delete()``, just subclass
the methods.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Person(models.Model):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
def __init__(self, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
self.data = []
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
def save(self, *args, **kwargs):
self.data.append("Before save")
# Call the "real" save() method
super(Person, self).save(*args, **kwargs)
self.data.append("After save")
def delete(self):
self.data.append("Before deletion")
# Call the "real" delete() method
super(Person, self).delete()
self.data.append("After deletion")
| bsd-3-clause |
nrejack/research-subject-mapper | bin/utils/emailsender.py | 2 | 2560 | import smtplib, os, logging
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.Utils import COMMASPACE, formatdate
from email import Encoders
class EmailProps():
"""
Plain Old Data Storage class used for passing email properties
"""
def __init__(self,
host,
port,
sender,
to_addr_list,
cc_addr_list = [],
subject = '',
msg_body = '',
attach_files = []):
self.host = host
self.port = port
self.sender = sender
self.to_addr_list = to_addr_list
self.cc_addr_list = cc_addr_list
self.subject = subject
self.msg_body = msg_body
self.attach_files = attach_files
class EmailSender():
'''
This class is a helper for sending emails
'''
def __init__(self):
pass
def send(self, props):
'''
Function to email the report of the get_data_from_redcap to site contact.
'''
assert type(props.to_addr_list) == list
recipients = ",".join(props.to_addr_list)
msg = MIMEMultipart()
msg['From'] = props.sender
msg['To'] = recipients
msg['Cc'] = ",".join(props.cc_addr_list)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = props.subject
logging.info("Message body: " + props.msg_body)
msg.attach( MIMEText(props.msg_body) )
try :
for f in props.attach_files :
logging.info("Adding email attachment: %s" % f)
part = MIMEBase('application', "octet-stream")
fh = open(f, "rb")
part.set_payload( fh.read() )
fh.close()
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(f))
msg.attach(part)
except:
logging.error('Unable to open file: %s' % f)
raise
try:
smtpObj = smtplib.SMTP(props.host, props.port)
smtpObj.sendmail(props.sender, recipients, msg.as_string())
smtpObj.close()
logging.info("Success sending email to: %s " % recipients)
except Exception, e:
logging.error("Error sending email to: %s - %s" % (recipients, str(e)))
return False
return True
| bsd-3-clause |
1orwell/yrs2013 | fake.py | 1 | 3440 | '''Generate necessary dump files'''
#options
size = 100
regenerate_graph = False
days = 1
force_layout = False
default = str(size)+'.dat'
###
import igraph, pickle, random, os
import math
from collections import OrderedDict
def process(fout):
output = os.path.join('data',fout)
try:
#load graph if previously generated.
g = pickle.load(open('dump.dat'))
print 'Graph loaded from dump.dat'
except IOError:
#generate graph if it does not exist in the directory
print 'Generating graph to dump.dat'
g = igraph.Graph()
g.add_vertices(791)
g.es["weight"] = 1.0
g.delete_vertices([0])
with open('./flu-data/edgeLists/durationCondition/addThenChop/dropoff=0/minimumDuration=1/deltaT=1620/staticWeightedEdgeList_at=1350_min=540_max=2159.txt') as edges:
for edge in edges:
u, v, w = map(int, edge.split())
g[u, v] = 1.0/w
g.delete_vertices(g.vs(_degree_eq = 0))
pickle.dump(g,open('dump.dat','wb'))
print 'Finished'
#take sample of n points
sample = random.sample(range(1,788),790-size)
g.delete_vertices(sample)
print g.summary()
#Fiddle layout
print 'Working out layout'
if force_layout:
#starting everyone at their own location
#coords definition stolen from sim_group_move.py
coords = []
wrap = 10 #positions per row
col_length = int(math.ceil(size/wrap))
for y in range(col_length):
for x in range(wrap):
coords.append((x,y))
print coords
centre = (wrap/2, col_length/2)
else:
l = g.layout_kamada_kawai()
centre = l.centroid()
coords = l.coords
def distance(x, y): return math.sqrt((x[0] - y[0])**2 + (x[1] - y[1])**2)
#sort the coords by their position from the centre
order = sorted(enumerate(coords), key = lambda x: distance(x[1], centre))
order = [x[0] for x in order]
#work out mininum global time
mintime = 1000 #must be less than this
for x in order:
if x == 0: continue
with open('./flu-data/moteFiles/node-'+str(x)) as fin:
line = fin.readline()
if line:
t = int(line.split()[-1])
if t < mintime:
mintime = t
completed = []
times = {}
print 'Generating movement file'
for node in order:
if node == 0: continue
times[node] = OrderedDict({0 : node})
node_name = 'node-'+str(node)
f = open('./flu-data/moteFiles/'+node_name, 'r')
for contact in f:
line = map(int, contact.split())
contact_id = line[0]
time = (line[-1] - mintime + 1)
if contact_id in completed:
current_max = 0
current_time = -1
for t, pos in times[contact_id].items():
if current_time < t <= time:
current_max = pos
current_time = t
position = current_max
times[node][time] = position
completed.append(node)
f.close()
print 'Writing movement file'
out = {'coords': coords, 'movement': times}
pickle.dump(out, open(output, 'wb'))
if __name__ == '__main__':
process(default)
| mit |
frankk00/realtor | common/display.py | 34 | 7057 | # Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def prep_stream(stream, actors):
stream.owner_ref = actors[stream.owner]
return stream
def prep_entry(entry, streams, actors):
"""function to append the applicable referenced items to the entry"""
entry.stream_ref = streams[entry.stream]
entry.owner_ref = actors[entry.owner]
entry.actor_ref = actors[entry.actor]
return entry
def prep_entry_entry(entry, entries):
return entry
if entry.entry:
entry.entry_ref = entries[entry.entry]
return entry
def prep_stream_dict(stream_dict, actors):
streams = dict([(k, prep_stream(v, actors))
for (k, v) in stream_dict.items()])
return streams
def prep_entry_list(entry_list, streams, actors):
entries = [prep_entry(e, streams, actors) for e in entry_list]
entries_by_keyname = dict([(e.key().name(), e) for e in entries])
entries = [prep_entry_entry(e, entries_by_keyname) for e in entries]
return entries
def prep_comment(comment, actors):
"""function to append the applicable referenced items to the comment"""
comment.owner_ref = actors[comment.owner]
comment.actor_ref = actors[comment.actor]
return comment
def prep_comment_list(comment_list, actors):
comments = [prep_comment(e, actors) for e in comment_list]
return comments
DEFAULT_AVATARS = [{'name': 'animal_%s' % i, 'path': 'default/animal_%s' % i}
for i in xrange(1,17)]
ICONS = {
'101': ('feed-blog', 'blog', ''),
'102': ('feed-bookmark', 'bookmark', ''),
'103': ('feed-bookwish', 'book wish', ''),
'104': ('feed-event', 'event', ''),
'105': ('feed-music', 'music', ''),
'106': ('feed-photo', 'photo', ''),
'107': ('feed-places', 'places', ''),
'108': ('feed-atom', 'atom', ''),
'109': ('feed-video', 'video', ''),
'200': ('jaiku-presence', 'presence', ''),
'201': ('jaiku-comment', 'comment', ''),
'202': ('jaiku-message', 'message', ''),
'203': ('jaiku-new-user', 'new user', ''),
'204': ('jaiku-sms', 'sms', ''),
'205': ('jaiku-contact-added', 'contact added', ''),
'300': ('web-speechbubble', 'speech bubble', ''),
'301': ('web-car', 'car', ''),
'302': ('web-alarmclock', 'alarm clock', ''),
'303': ('web-loudspeaker', 'loudspeaker', ''),
'304': ('web-tram', 'tram', ''),
'305': ('web-casette', 'casette', ''),
'306': ('web-underware', 'underwear', ''),
'307': ('web-rollerblade', 'rollerblade', ''),
'308': ('web-uzi', 'uzi', ''),
'309': ('web-scoop', 'scoop', ''),
'310': ('web-bomb', 'bomb', ''),
'311': ('web-bra', 'bra', ''),
'312': ('web-videotape', 'videotape', ''),
'313': ('web-cigarettes', 'cigarettes', ''),
'314': ('web-vinyl', 'vinyl', ''),
'315': ('web-champaign', 'champaign', ''),
'316': ('web-airplain', 'airport', ''),
'317': ('web-bus', 'bus', ''),
'318': ('web-grumpy', 'grumpy', ''),
'319': ('web-coffee', 'coffee', ''),
'320': ('web-camera', 'camera', ''),
'321': ('web-basketball', 'basketball', ''),
'322': ('web-beer', 'beer', ''),
'323': ('web-binoculars', 'binoculars', ''),
'324': ('web-boiler', 'boiler', ''),
'325': ('web-walk', 'walk', ''),
'326': ('web-wallclock', 'wallclock', ''),
'327': ('web-trashcan', 'trashcan', ''),
'328': ('web-tv', 'tv', ''),
'329': ('web-computing', 'computer', ''),
'330': ('web-videocamera', 'videocamera', ''),
'331': ('web-game', 'game', ''),
'332': ('web-cone', 'cone', ''),
'333': ('web-driller', 'driller', ''),
'334': ('web-popcorn', 'popcorn', ''),
'335': ('web-playshirt', 'play', ''),
'336': ('web-disc', 'disc', ''),
'337': ('web-event', 'event', ''),
'338': ('web-exclamationmark', 'exclamationmark', ''),
'339': ('web-football', 'football', ''),
'340': ('web-footballshoe', 'football shoe', ''),
'341': ('web-eat', 'fork', ''),
'342': ('web-gameboy', 'gameboy', ''),
'343': ('web-grenade', 'grenade', ''),
'344': ('web-hand', 'hand', ''),
'345': ('web-hanger', 'hanger', ''),
'346': ('web-hearingprotector', 'ear muffs', ''),
'347': ('web-love', 'love', ''),
'348': ('web-balloons', 'balloons', ''),
'349': ('web-clock', 'clock', ''),
'350': ('web-barrier', 'barrier', ''),
'351': ('web-laptop', 'laptop', ''),
'352': ('web-megaphone', 'megaphone', ''),
'353': ('web-microwave', 'microwave', ''),
'354': ('web-book', 'book', ''),
'355': ('web-middlefinger', 'middle finger', ''),
'356': ('web-notes', 'notes', ''),
'357': ('web-question', 'question', ''),
'358': ('web-rollator', 'rollator', ''),
'359': ('web-shuttlecock', 'shuttlecock', ''),
'360': ('web-salt', 'salt', ''),
'361': ('web-scull', 'scull', ''),
'362': ('web-sk8', 'sk8', ''),
'363': ('web-sleep', 'leep', ''),
'364': ('web-snorkeling', 'snorkeling', ''),
'365': ('web-snowflake', 'snowflake', ''),
'366': ('web-soda', 'soda', ''),
'367': ('web-song', 'song', ''),
'368': ('web-spraycan', 'spray', ''),
'369': ('web-sticks', 'sticks', ''),
'370': ('web-storm', 'storm', ''),
'371': ('web-straitjacket', 'straitjacket', ''),
'372': ('web-metro', 'metro', ''),
'373': ('web-luggage', 'luggage', ''),
'374': ('web-sun', 'sun', ''),
'375': ('web-taxi', 'taxi', ''),
'376': ('web-technics', 'technics', ''),
'377': ('web-toaster', 'toaster', ''),
'378': ('web-train', 'train', ''),
'379': ('web-wheelchair', 'wheelchair', ''),
'380': ('web-zippo', 'zippo', ''),
'381': ('web-icecream', 'ice cream', ''),
'382': ('web-movie', 'movie', ''),
'383': ('web-makeup', 'makeup', ''),
'384': ('web-bandaid', 'bandaid', ''),
'385': ('web-wine', 'wine', ''),
'386': ('web-clean', 'clean', ''),
'387': ('web-blading', 'blading', ''),
'388': ('web-bike', 'bike', ''),
'389': ('web-pils', 'pils', ''),
'390': ('web-picnic', 'picnic', ''),
'391': ('web-lifejacket', 'lifejacket', ''),
'392': ('web-home', 'home', ''),
'393': ('web-happy', 'happy', ''),
'394': ('web-toiletpaper', 'toiletpaper', ''),
'395': ('web-theatre', 'theatre', ''),
'396': ('web-shop', 'shop', ''),
'397': ('web-search', 'search', ''),
'398': ('web-cloudy', 'cloudy', ''),
'399': ('web-hurry', 'Hurry', ''),
'400': ('web-morning', 'Morning', ''),
'401': ('web-car', 'Car', ''),
'402': ('web-baby-boy', 'Itsaboy', ''),
'403': ('web-baby-girl', 'Itsagirl', ''),
}
ICONS_BY_ID = dict([(v[0], v) for k, v in ICONS.iteritems()])
SELECTABLE_ICONS = dict([(k, v) for k, v in ICONS.iteritems()
if k > '300'])
del SELECTABLE_ICONS['340']
del SELECTABLE_ICONS['351']
del SELECTABLE_ICONS['401']
del SELECTABLE_ICONS['403']
| bsd-3-clause |
iulian787/spack | var/spack/repos/builtin/packages/swftools/package.py | 5 | 1166 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Swftools(AutotoolsPackage):
"""SWFTools is a collection of utilities for working with Adobe Flash files
(SWF files). The tool collection includes programs for reading SWF files,
combining them, and creating them from other content (like images, sound
files, videos or sourcecode). SWFTools is released under the GPL.
"""
homepage = "http://swftools.org"
url = "http://swftools.org/swftools-0.9.2.tar.gz"
version('0.9.2', sha256='bf6891bfc6bf535a1a99a485478f7896ebacbe3bbf545ba551298080a26f01f1')
patch('configure.patch')
patch('swfs_Makefile.in.patch')
patch('https://aur.archlinux.org/cgit/aur.git/plain/giflib-5.1.patch?h=swftools',
sha256='6a995dfd674c5954f5b967e3d45d6845a186872fcaa4223d725902fd4d679f1b',
level=0)
depends_on('giflib')
depends_on('lame')
depends_on('poppler')
depends_on('freetype')
depends_on('jpeg')
depends_on('fftw')
| lgpl-2.1 |
xin3liang/platform_external_chromium-trace | trace-viewer/third_party/closure_linter/closure_linter/requireprovidesorter_test.py | 135 | 2357 | #!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for RequireProvideSorter."""
import unittest as googletest
from closure_linter import ecmametadatapass
from closure_linter import javascripttokenizer
from closure_linter import javascripttokens
from closure_linter import requireprovidesorter
# pylint: disable-msg=C6409
TokenType = javascripttokens.JavaScriptTokenType
class RequireProvideSorterTest(googletest.TestCase):
"""Tests for RequireProvideSorter."""
_tokenizer = javascripttokenizer.JavaScriptTokenizer()
_metadata_pass = ecmametadatapass.EcmaMetaDataPass()
def testFixRequires_removeBlankLines(self):
"""Tests that blank lines are omitted in sorted goog.require statements."""
input_lines = [
'goog.provide(\'package.subpackage.Whatever\');',
'',
'goog.require(\'package.subpackage.ClassB\');',
'',
'goog.require(\'package.subpackage.ClassA\');'
]
expected_lines = [
'goog.provide(\'package.subpackage.Whatever\');',
'',
'goog.require(\'package.subpackage.ClassA\');',
'goog.require(\'package.subpackage.ClassB\');'
]
token = self._tokenizer.TokenizeFile(input_lines)
self._metadata_pass.Reset()
self._metadata_pass.Process(token)
sorter = requireprovidesorter.RequireProvideSorter()
sorter.FixRequires(token)
self.assertEquals(expected_lines, self._GetLines(token))
def _GetLines(self, token):
"""Returns an array of lines based on the specified token stream."""
lines = []
line = ''
while token:
line += token.string
if token.IsLastInLine():
lines.append(line)
line = ''
token = token.next
return lines
if __name__ == '__main__':
googletest.main()
| bsd-3-clause |
Ms2ger/servo | tests/wpt/css-tests/tools/html5lib/html5lib/tests/test_encoding.py | 445 | 2228 | from __future__ import absolute_import, division, unicode_literals
import os
import unittest
try:
unittest.TestCase.assertEqual
except AttributeError:
unittest.TestCase.assertEqual = unittest.TestCase.assertEquals
from .support import get_data_files, TestData, test_dir, errorMessage
from html5lib import HTMLParser, inputstream
class Html5EncodingTestCase(unittest.TestCase):
def test_codec_name_a(self):
self.assertEqual(inputstream.codecName("utf-8"), "utf-8")
def test_codec_name_b(self):
self.assertEqual(inputstream.codecName("utf8"), "utf-8")
def test_codec_name_c(self):
self.assertEqual(inputstream.codecName(" utf8 "), "utf-8")
def test_codec_name_d(self):
self.assertEqual(inputstream.codecName("ISO_8859--1"), "windows-1252")
def runParserEncodingTest(data, encoding):
p = HTMLParser()
assert p.documentEncoding is None
p.parse(data, useChardet=False)
encoding = encoding.lower().decode("ascii")
assert encoding == p.documentEncoding, errorMessage(data, encoding, p.documentEncoding)
def runPreScanEncodingTest(data, encoding):
stream = inputstream.HTMLBinaryInputStream(data, chardet=False)
encoding = encoding.lower().decode("ascii")
# Very crude way to ignore irrelevant tests
if len(data) > stream.numBytesMeta:
return
assert encoding == stream.charEncoding[0], errorMessage(data, encoding, stream.charEncoding[0])
def test_encoding():
for filename in get_data_files("encoding"):
tests = TestData(filename, b"data", encoding=None)
for idx, test in enumerate(tests):
yield (runParserEncodingTest, test[b'data'], test[b'encoding'])
yield (runPreScanEncodingTest, test[b'data'], test[b'encoding'])
try:
try:
import charade # flake8: noqa
except ImportError:
import chardet # flake8: noqa
except ImportError:
print("charade/chardet not found, skipping chardet tests")
else:
def test_chardet():
with open(os.path.join(test_dir, "encoding" , "chardet", "test_big5.txt"), "rb") as fp:
encoding = inputstream.HTMLInputStream(fp.read()).charEncoding
assert encoding[0].lower() == "big5"
| mpl-2.0 |
shtouff/django | django/conf/locale/id/formats.py | 504 | 2135 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j N Y'
DATETIME_FORMAT = "j N Y, G.i"
TIME_FORMAT = 'G.i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y G.i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d-%m-%y', '%d/%m/%y', # '25-10-09', 25/10/09'
'%d-%m-%Y', '%d/%m/%Y', # '25-10-2009', 25/10/2009'
'%d %b %Y', # '25 Oct 2006',
'%d %B %Y', # '25 October 2006'
]
TIME_INPUT_FORMATS = [
'%H.%M.%S', # '14.30.59'
'%H.%M', # '14.30'
]
DATETIME_INPUT_FORMATS = [
'%d-%m-%Y %H.%M.%S', # '25-10-2009 14.30.59'
'%d-%m-%Y %H.%M.%S.%f', # '25-10-2009 14.30.59.000200'
'%d-%m-%Y %H.%M', # '25-10-2009 14.30'
'%d-%m-%Y', # '25-10-2009'
'%d-%m-%y %H.%M.%S', # '25-10-09' 14.30.59'
'%d-%m-%y %H.%M.%S.%f', # '25-10-09' 14.30.59.000200'
'%d-%m-%y %H.%M', # '25-10-09' 14.30'
'%d-%m-%y', # '25-10-09''
'%m/%d/%y %H.%M.%S', # '10/25/06 14.30.59'
'%m/%d/%y %H.%M.%S.%f', # '10/25/06 14.30.59.000200'
'%m/%d/%y %H.%M', # '10/25/06 14.30'
'%m/%d/%y', # '10/25/06'
'%m/%d/%Y %H.%M.%S', # '25/10/2009 14.30.59'
'%m/%d/%Y %H.%M.%S.%f', # '25/10/2009 14.30.59.000200'
'%m/%d/%Y %H.%M', # '25/10/2009 14.30'
'%m/%d/%Y', # '10/25/2009'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
wangyum/spark | python/pyspark/resource/profile.py | 22 | 7113 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.resource.requests import TaskResourceRequest, TaskResourceRequests, \
ExecutorResourceRequests, ExecutorResourceRequest
class ResourceProfile(object):
"""
Resource profile to associate with an RDD. A :class:`pyspark.resource.ResourceProfile`
allows the user to specify executor and task requirements for an RDD that will get
applied during a stage. This allows the user to change the resource requirements between
stages. This is meant to be immutable so user cannot change it after building.
.. versionadded:: 3.1.0
Notes
-----
This API is evolving.
"""
def __init__(self, _java_resource_profile=None, _exec_req=None, _task_req=None):
if _java_resource_profile is not None:
self._java_resource_profile = _java_resource_profile
else:
self._java_resource_profile = None
self._executor_resource_requests = _exec_req or {}
self._task_resource_requests = _task_req or {}
@property
def id(self):
if self._java_resource_profile is not None:
return self._java_resource_profile.id()
else:
raise RuntimeError("SparkContext must be created to get the id, get the id "
"after adding the ResourceProfile to an RDD")
@property
def taskResources(self):
if self._java_resource_profile is not None:
taskRes = self._java_resource_profile.taskResourcesJMap()
result = {}
for k, v in taskRes.items():
result[k] = TaskResourceRequest(v.resourceName(), v.amount())
return result
else:
return self._task_resource_requests
@property
def executorResources(self):
if self._java_resource_profile is not None:
execRes = self._java_resource_profile.executorResourcesJMap()
result = {}
for k, v in execRes.items():
result[k] = ExecutorResourceRequest(v.resourceName(), v.amount(),
v.discoveryScript(), v.vendor())
return result
else:
return self._executor_resource_requests
class ResourceProfileBuilder(object):
"""
Resource profile Builder to build a resource profile to associate with an RDD.
A ResourceProfile allows the user to specify executor and task requirements for
an RDD that will get applied during a stage. This allows the user to change the
resource requirements between stages.
.. versionadded:: 3.1.0
Notes
-----
This API is evolving.
"""
def __init__(self):
from pyspark.context import SparkContext
_jvm = SparkContext._jvm
if _jvm is not None:
self._jvm = _jvm
self._java_resource_profile_builder = \
_jvm.org.apache.spark.resource.ResourceProfileBuilder()
else:
self._jvm = None
self._java_resource_profile_builder = None
self._executor_resource_requests = {}
self._task_resource_requests = {}
def require(self, resourceRequest):
if isinstance(resourceRequest, TaskResourceRequests):
if self._java_resource_profile_builder is not None:
if resourceRequest._java_task_resource_requests is not None:
self._java_resource_profile_builder.require(
resourceRequest._java_task_resource_requests)
else:
taskReqs = TaskResourceRequests(self._jvm, resourceRequest.requests)
self._java_resource_profile_builder.require(
taskReqs._java_task_resource_requests)
else:
self._task_resource_requests.update(resourceRequest.requests)
else:
if self._java_resource_profile_builder is not None:
if resourceRequest._java_executor_resource_requests is not None:
self._java_resource_profile_builder.require(
resourceRequest._java_executor_resource_requests)
else:
execReqs = ExecutorResourceRequests(self._jvm, resourceRequest.requests)
self._java_resource_profile_builder.require(
execReqs._java_executor_resource_requests)
else:
self._executor_resource_requests.update(resourceRequest.requests)
return self
def clearExecutorResourceRequests(self):
if self._java_resource_profile_builder is not None:
self._java_resource_profile_builder.clearExecutorResourceRequests()
else:
self._executor_resource_requests = {}
def clearTaskResourceRequests(self):
if self._java_resource_profile_builder is not None:
self._java_resource_profile_builder.clearTaskResourceRequests()
else:
self._task_resource_requests = {}
@property
def taskResources(self):
if self._java_resource_profile_builder is not None:
taskRes = self._java_resource_profile_builder.taskResourcesJMap()
result = {}
for k, v in taskRes.items():
result[k] = TaskResourceRequest(v.resourceName(), v.amount())
return result
else:
return self._task_resource_requests
@property
def executorResources(self):
if self._java_resource_profile_builder is not None:
result = {}
execRes = self._java_resource_profile_builder.executorResourcesJMap()
for k, v in execRes.items():
result[k] = ExecutorResourceRequest(v.resourceName(), v.amount(),
v.discoveryScript(), v.vendor())
return result
else:
return self._executor_resource_requests
@property
def build(self):
if self._java_resource_profile_builder is not None:
jresourceProfile = self._java_resource_profile_builder.build()
return ResourceProfile(_java_resource_profile=jresourceProfile)
else:
return ResourceProfile(_exec_req=self._executor_resource_requests,
_task_req=self._task_resource_requests)
| apache-2.0 |
abenzbiria/clients_odoo | openerp/tools/float_utils.py | 151 | 9267 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import math
def _float_check_precision(precision_digits=None, precision_rounding=None):
assert (precision_digits is not None or precision_rounding is not None) and \
not (precision_digits and precision_rounding),\
"exactly one of precision_digits and precision_rounding must be specified"
if precision_digits is not None:
return 10 ** -precision_digits
return precision_rounding
def float_round(value, precision_digits=None, precision_rounding=None):
"""Return ``value`` rounded to ``precision_digits``
decimal digits, minimizing IEEE-754 floating point representation
errors, and applying HALF-UP (away from zero) tie-breaking rule.
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
:param float value: the value to round
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:return: rounded float
"""
rounding_factor = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
if rounding_factor == 0 or value == 0: return 0.0
# NORMALIZE - ROUND - DENORMALIZE
# In order to easily support rounding to arbitrary 'steps' (e.g. coin values),
# we normalize the value before rounding it as an integer, and de-normalize
# after rounding: e.g. float_round(1.3, precision_rounding=.5) == 1.5
# TIE-BREAKING: HALF-UP
# We want to apply HALF-UP tie-breaking rules, i.e. 0.5 rounds away from 0.
# Due to IEE754 float/double representation limits, the approximation of the
# real value may be slightly below the tie limit, resulting in an error of
# 1 unit in the last place (ulp) after rounding.
# For example 2.675 == 2.6749999999999998.
# To correct this, we add a very small epsilon value, scaled to the
# the order of magnitude of the value, to tip the tie-break in the right
# direction.
# Credit: discussion with OpenERP community members on bug 882036
normalized_value = value / rounding_factor # normalize
epsilon_magnitude = math.log(abs(normalized_value), 2)
epsilon = 2**(epsilon_magnitude-53)
normalized_value += cmp(normalized_value,0) * epsilon
rounded_value = round(normalized_value) # round to integer
result = rounded_value * rounding_factor # de-normalize
return result
def float_is_zero(value, precision_digits=None, precision_rounding=None):
"""Returns true if ``value`` is small enough to be treated as
zero at the given precision (smaller than the corresponding *epsilon*).
The precision (``10**-precision_digits`` or ``precision_rounding``)
is used as the zero *epsilon*: values less than that are considered
to be zero.
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
``float_compare(value1,value2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param float value: value to compare with the precision's zero
:return: True if ``value`` is considered zero
"""
epsilon = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
return abs(float_round(value, precision_rounding=epsilon)) < epsilon
def float_compare(value1, value2, precision_digits=None, precision_rounding=None):
"""Compare ``value1`` and ``value2`` after rounding them according to the
given precision. A value is considered lower/greater than another value
if their rounded value is different. This is not the same as having a
non-zero difference!
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
Example: 1.432 and 1.431 are equal at 2 digits precision,
so this method would return 0
However 0.006 and 0.002 are considered different (this method returns 1)
because they respectively round to 0.01 and 0.0, even though
0.006-0.002 = 0.004 which would be considered zero at 2 digits precision.
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
``float_compare(value1,value2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param float value1: first value to compare
:param float value2: second value to compare
:return: (resp.) -1, 0 or 1, if ``value1`` is (resp.) lower than,
equal to, or greater than ``value2``, at the given precision.
"""
rounding_factor = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
value1 = float_round(value1, precision_rounding=rounding_factor)
value2 = float_round(value2, precision_rounding=rounding_factor)
delta = value1 - value2
if float_is_zero(delta, precision_rounding=rounding_factor): return 0
return -1 if delta < 0.0 else 1
def float_repr(value, precision_digits):
"""Returns a string representation of a float with the
the given number of fractional digits. This should not be
used to perform a rounding operation (this is done via
:meth:`~.float_round`), but only to produce a suitable
string representation for a float.
:param int precision_digits: number of fractional digits to
include in the output
"""
# Can't use str() here because it seems to have an intrisic
# rounding to 12 significant digits, which causes a loss of
# precision. e.g. str(123456789.1234) == str(123456789.123)!!
return ("%%.%sf" % precision_digits) % value
if __name__ == "__main__":
import time
start = time.time()
count = 0
errors = 0
def try_round(amount, expected, precision_digits=3):
global count, errors; count += 1
result = float_repr(float_round(amount, precision_digits=precision_digits),
precision_digits=precision_digits)
if result != expected:
errors += 1
print '###!!! Rounding error: got %s , expected %s' % (result, expected)
# Extended float range test, inspired by Cloves Almeida's test on bug #882036.
fractions = [.0, .015, .01499, .675, .67499, .4555, .4555, .45555]
expecteds = ['.00', '.02', '.01', '.68', '.67', '.46', '.456', '.4556']
precisions = [2, 2, 2, 2, 2, 2, 3, 4]
for magnitude in range(7):
for i in xrange(len(fractions)):
frac, exp, prec = fractions[i], expecteds[i], precisions[i]
for sign in [-1,1]:
for x in xrange(0,10000,97):
n = x * 10**magnitude
f = sign * (n + frac)
f_exp = ('-' if f != 0 and sign == -1 else '') + str(n) + exp
try_round(f, f_exp, precision_digits=prec)
stop = time.time()
# Micro-bench results:
# 47130 round calls in 0.422306060791 secs, with Python 2.6.7 on Core i3 x64
# with decimal:
# 47130 round calls in 6.612248100021 secs, with Python 2.6.7 on Core i3 x64
print count, " round calls, ", errors, "errors, done in ", (stop-start), 'secs'
| agpl-3.0 |
leiferikb/bitpop | build/third_party/twisted_10_2/twisted/words/test/test_basesupport.py | 55 | 3066 | # Copyright (c) 2001-2006 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.words.im import basesupport
from twisted.internet import error, defer
class DummyAccount(basesupport.AbstractAccount):
"""
An account object that will do nothing when asked to start to log on.
"""
loginHasFailed = False
loginCallbackCalled = False
def _startLogOn(self, *args):
"""
Set self.loginDeferred to the same as the deferred returned, allowing a
testcase to .callback or .errback.
@return: A deferred.
"""
self.loginDeferred = defer.Deferred()
return self.loginDeferred
def _loginFailed(self, result):
self.loginHasFailed = True
return basesupport.AbstractAccount._loginFailed(self, result)
def _cb_logOn(self, result):
self.loginCallbackCalled = True
return basesupport.AbstractAccount._cb_logOn(self, result)
class DummyUI(object):
"""
Provide just the interface required to be passed to AbstractAccount.logOn.
"""
clientRegistered = False
def registerAccountClient(self, result):
self.clientRegistered = True
class ClientMsgTests(unittest.TestCase):
def makeUI(self):
return DummyUI()
def makeAccount(self):
return DummyAccount('la', False, 'la', None, 'localhost', 6667)
def test_connect(self):
"""
Test that account.logOn works, and it calls the right callback when a
connection is established.
"""
account = self.makeAccount()
ui = self.makeUI()
d = account.logOn(ui)
account.loginDeferred.callback(None)
def check(result):
self.assert_(not account.loginHasFailed,
"Login shouldn't have failed")
self.assert_(account.loginCallbackCalled,
"We should be logged in")
d.addCallback(check)
return d
def test_failedConnect(self):
"""
Test that account.logOn works, and it calls the right callback when a
connection is established.
"""
account = self.makeAccount()
ui = self.makeUI()
d = account.logOn(ui)
account.loginDeferred.errback(Exception())
def err(reason):
self.assert_(account.loginHasFailed, "Login should have failed")
self.assert_(not account.loginCallbackCalled,
"We shouldn't be logged in")
self.assert_(not ui.clientRegistered,
"Client shouldn't be registered in the UI")
cb = lambda r: self.assert_(False, "Shouldn't get called back")
d.addCallbacks(cb, err)
return d
def test_alreadyConnecting(self):
"""
Test that it can fail sensibly when someone tried to connect before
we did.
"""
account = self.makeAccount()
ui = self.makeUI()
account.logOn(ui)
self.assertRaises(error.ConnectError, account.logOn, ui)
| gpl-3.0 |
scwhitehouse/rose | lib/python/rose/scheme_handler.py | 2 | 5011 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2012-7 Met Office.
#
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
"""Load and select from a group of related functional classes."""
from glob import glob
import inspect
import os
import sys
class SchemeHandlersManager(object):
"""Load and select from a group of related functional classes."""
CAN_HANDLE = "can_handle"
def __init__(self, paths, ns=None, attrs=None, can_handle=None,
*args, **kwargs):
"""Load modules in paths and initialise any classes with a SCHEME.
If "ns" is not None, only modules under the specified name-space in
paths are searched and imported. ("ns" should be a str in the form
"a.b", which will be converted as "a/b" for path search.)
Initialise each handler, and save it in self.handlers, which is a dict
of {scheme: handler, ...}.
If attrs is specified, it should be a list of attributes the class
has that do not have None values.
args and kwargs are passed as *args, **kwargs to the constructor of
each class. This manager will be passed to the constructor using the
kwargs["manager"].
Each handler class may have a SCHEMES attribute (a list of str) or a
SCHEME attribute with a str value, which will be used as the keys to
self.handlers of this manager.
Optionally, a handler may have a h.can_handle(scheme, **kwargs) method
that returns a boolean value to indicate whether it can handle a given
value.
"""
self.handlers = {}
if can_handle is None:
can_handle = self.CAN_HANDLE
self.can_handle = can_handle
cwd = os.getcwd()
ns_path = ""
if ns:
ns_path = os.path.join(*(ns.split("."))) + os.sep
for path in paths:
os.chdir(path) # assuming that "" is at the front of sys.path
sys.path.insert(0, path)
try:
kwargs["manager"] = self
for file_name in glob(ns_path + "*.py"):
if file_name.startswith("__"):
continue
mod_path = file_name[0:-3]
mod_name = mod_path.replace(os.sep, ".")
mod = __import__(mod_name, fromlist=[""])
members = inspect.getmembers(mod, inspect.isclass)
scheme0_default = None
if len(members) == 1:
scheme0_default = os.path.basename(mod_path)
for key, c in members:
if any([getattr(c, a, None) is None for a in attrs]):
continue
handler = None
scheme0 = getattr(c, "SCHEME", scheme0_default)
schemes = []
if scheme0 is not None:
schemes = [scheme0]
for scheme in getattr(c, "SCHEMES", schemes):
if scheme in self.handlers:
raise ValueError(c) # scheme already used
kwargs["manager"] = self
if handler is None:
handler = c(*args, **kwargs)
self.handlers[scheme] = handler
finally:
os.chdir(cwd)
sys.path.pop(0)
def get_handler(self, scheme):
"""Return the handler with a matching scheme.
Return None if there is no handler with a matching scheme.
"""
try:
return self.handlers[scheme]
except (KeyError, TypeError):
pass
def guess_handler(self, item):
"""Return a handler that can handle item.
Return None if there is no handler with a matching scheme.
"""
handler = self.get_handler(item)
if handler:
return handler
for handler in self.handlers.values():
can_handle = getattr(handler, self.can_handle, None)
if (callable(can_handle) and can_handle(item)):
return handler
| gpl-3.0 |
pamfilos/invenio | modules/bibrank/lib/bibrank_tag_based_indexer.py | 13 | 20833 | # -*- coding: utf-8 -*-
## Ranking of records using different parameters and methods.
## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import sys
import time
import traceback
import ConfigParser
from invenio.config import \
CFG_SITE_LANG, \
CFG_ETCDIR
from invenio.search_engine import perform_request_search
from invenio.bibrank_citation_indexer import get_citation_weight, print_missing
from invenio.bibrank_downloads_indexer import *
from invenio.dbquery import run_sql, serialize_via_marshal, deserialize_via_marshal, \
wash_table_column_name, get_table_update_time
from invenio.bibtask import task_get_option, write_message, task_sleep_now_if_required
from invenio.bibindex_engine import create_range_list
from invenio.intbitset import intbitset
options = {}
def download_weight_filtering_user_repair_exec ():
"""Repair download weight filtering user ranking method"""
write_message("Repairing for this ranking method is not defined. Skipping.")
return
def download_weight_total_repair_exec():
"""Repair download weight total ranking method"""
write_message("Repairing for this ranking method is not defined. Skipping.")
return
def file_similarity_by_times_downloaded_repair_exec():
"""Repair file similarity by times downloaded ranking method"""
write_message("Repairing for this ranking method is not defined. Skipping.")
return
def single_tag_rank_method_repair_exec():
"""Repair single tag ranking method"""
write_message("Repairing for this ranking method is not defined. Skipping.")
return
def citation_exec(rank_method_code, name, config):
"""Rank method for citation analysis"""
#first check if this is a specific task
if task_get_option("cmd") == "print-missing":
num = task_get_option("num")
print_missing(num)
else:
dic, index_update_time = get_citation_weight(rank_method_code, config)
if dic:
if task_get_option("id") or task_get_option("collection") or \
task_get_option("modified"):
# user have asked to citation-index specific records
# only, so we should not update citation indexer's
# last run time stamp information
index_update_time = None
intoDB(dic, index_update_time, rank_method_code)
else:
write_message("No need to update the indexes for citations.")
def download_weight_filtering_user(run):
return bibrank_engine(run)
def download_weight_total(run):
return bibrank_engine(run)
def file_similarity_by_times_downloaded(run):
return bibrank_engine(run)
def download_weight_filtering_user_exec (rank_method_code, name, config):
"""Ranking by number of downloads per User.
Only one full Text Download is taken in account for one
specific userIP address"""
begin_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
time1 = time.time()
dic = fromDB(rank_method_code)
last_updated = get_lastupdated(rank_method_code)
keys = new_downloads_to_index(last_updated)
filter_downloads_per_hour(keys, last_updated)
dic = get_download_weight_filtering_user(dic, keys)
intoDB(dic, begin_date, rank_method_code)
time2 = time.time()
return {"time":time2-time1}
def download_weight_total_exec(rank_method_code, name, config):
"""rankink by total number of downloads without check the user ip
if users downloads 3 time the same full text document it has to be count as 3 downloads"""
begin_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
time1 = time.time()
dic = fromDB(rank_method_code)
last_updated = get_lastupdated(rank_method_code)
keys = new_downloads_to_index(last_updated)
filter_downloads_per_hour(keys, last_updated)
dic = get_download_weight_total(dic, keys)
intoDB(dic, begin_date, rank_method_code)
time2 = time.time()
return {"time":time2-time1}
def file_similarity_by_times_downloaded_exec(rank_method_code, name, config):
"""update dictionnary {recid:[(recid, nb page similarity), ()..]}"""
begin_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
time1 = time.time()
dic = fromDB(rank_method_code)
last_updated = get_lastupdated(rank_method_code)
keys = new_downloads_to_index(last_updated)
filter_downloads_per_hour(keys, last_updated)
dic = get_file_similarity_by_times_downloaded(dic, keys)
intoDB(dic, begin_date, rank_method_code)
time2 = time.time()
return {"time":time2-time1}
def single_tag_rank_method_exec(rank_method_code, name, config):
"""Creating the rank method data"""
begin_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
rnkset = {}
rnkset_old = fromDB(rank_method_code)
rnkset_new = single_tag_rank(config)
rnkset = union_dicts(rnkset_old, rnkset_new)
intoDB(rnkset, begin_date, rank_method_code)
def single_tag_rank(config):
"""Connect the given tag with the data from the kb file given"""
write_message("Loading knowledgebase file", verbose=9)
kb_data = {}
records = []
write_message("Reading knowledgebase file: %s" % \
config.get(config.get("rank_method", "function"), "kb_src"))
input = open(config.get(config.get("rank_method", "function"), "kb_src"), 'r')
data = input.readlines()
for line in data:
if not line[0:1] == "#":
kb_data[string.strip((string.split(string.strip(line), "---"))[0])] = (string.split(string.strip(line), "---"))[1]
write_message("Number of lines read from knowledgebase file: %s" % len(kb_data))
tag = config.get(config.get("rank_method", "function"), "tag")
tags = config.get(config.get("rank_method", "function"), "check_mandatory_tags").split(", ")
if tags == ['']:
tags = ""
records = []
for (recids, recide) in options["recid_range"]:
task_sleep_now_if_required(can_stop_too=True)
write_message("......Processing records #%s-%s" % (recids, recide))
recs = run_sql("SELECT id_bibrec, value FROM bib%sx, bibrec_bib%sx WHERE tag=%%s AND id_bibxxx=id and id_bibrec >=%%s and id_bibrec<=%%s" % (tag[0:2], tag[0:2]), (tag, recids, recide))
valid = intbitset(trailing_bits=1)
valid.discard(0)
for key in tags:
newset = intbitset()
newset += [recid[0] for recid in (run_sql("SELECT id_bibrec FROM bib%sx, bibrec_bib%sx WHERE id_bibxxx=id AND tag=%%s AND id_bibxxx=id and id_bibrec >=%%s and id_bibrec<=%%s" % (tag[0:2], tag[0:2]), (key, recids, recide)))]
valid.intersection_update(newset)
if tags:
recs = filter(lambda x: x[0] in valid, recs)
records = records + list(recs)
write_message("Number of records found with the necessary tags: %s" % len(records))
records = filter(lambda x: x[0] in options["validset"], records)
rnkset = {}
for key, value in records:
if kb_data.has_key(value):
if not rnkset.has_key(key):
rnkset[key] = float(kb_data[value])
else:
if kb_data.has_key(rnkset[key]) and float(kb_data[value]) > float((rnkset[key])[1]):
rnkset[key] = float(kb_data[value])
else:
rnkset[key] = 0
write_message("Number of records available in rank method: %s" % len(rnkset))
return rnkset
def get_lastupdated(rank_method_code):
"""Get the last time the rank method was updated"""
res = run_sql("SELECT rnkMETHOD.last_updated FROM rnkMETHOD WHERE name=%s", (rank_method_code, ))
if res:
return res[0][0]
else:
raise Exception("Is this the first run? Please do a complete update.")
def intoDB(dic, date, rank_method_code):
"""Insert the rank method data into the database"""
mid = run_sql("SELECT id from rnkMETHOD where name=%s", (rank_method_code, ))
del_rank_method_codeDATA(rank_method_code)
serdata = serialize_via_marshal(dic)
midstr = str(mid[0][0])
run_sql("INSERT INTO rnkMETHODDATA(id_rnkMETHOD, relevance_data) VALUES (%s,%s)", (midstr, serdata,))
if date:
run_sql("UPDATE rnkMETHOD SET last_updated=%s WHERE name=%s", (date, rank_method_code))
def fromDB(rank_method_code):
"""Get the data for a rank method"""
id = run_sql("SELECT id from rnkMETHOD where name=%s", (rank_method_code, ))
res = run_sql("SELECT relevance_data FROM rnkMETHODDATA WHERE id_rnkMETHOD=%s", (id[0][0], ))
if res:
return deserialize_via_marshal(res[0][0])
else:
return {}
def del_rank_method_codeDATA(rank_method_code):
"""Delete the data for a rank method"""
id = run_sql("SELECT id from rnkMETHOD where name=%s", (rank_method_code, ))
run_sql("DELETE FROM rnkMETHODDATA WHERE id_rnkMETHOD=%s", (id[0][0], ))
def del_recids(rank_method_code, range_rec):
"""Delete some records from the rank method"""
id = run_sql("SELECT id from rnkMETHOD where name=%s", (rank_method_code, ))
res = run_sql("SELECT relevance_data FROM rnkMETHODDATA WHERE id_rnkMETHOD=%s", (id[0][0], ))
if res:
rec_dict = deserialize_via_marshal(res[0][0])
write_message("Old size: %s" % len(rec_dict))
for (recids, recide) in range_rec:
for i in range(int(recids), int(recide)):
if rec_dict.has_key(i):
del rec_dict[i]
write_message("New size: %s" % len(rec_dict))
begin_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
intoDB(rec_dict, begin_date, rank_method_code)
else:
write_message("Create before deleting!")
def union_dicts(dict1, dict2):
"Returns union of the two dicts."
union_dict = {}
for (key, value) in dict1.iteritems():
union_dict[key] = value
for (key, value) in dict2.iteritems():
union_dict[key] = value
return union_dict
def rank_method_code_statistics(rank_method_code):
"""Print statistics"""
method = fromDB(rank_method_code)
max = ('', -999999)
maxcount = 0
min = ('', 999999)
mincount = 0
for (recID, value) in method.iteritems():
if value < min and value > 0:
min = value
if value > max:
max = value
for (recID, value) in method.iteritems():
if value == min:
mincount += 1
if value == max:
maxcount += 1
write_message("Showing statistic for selected method")
write_message("Method name: %s" % getName(rank_method_code))
write_message("Short name: %s" % rank_method_code)
write_message("Last run: %s" % get_lastupdated(rank_method_code))
write_message("Number of records: %s" % len(method))
write_message("Lowest value: %s - Number of records: %s" % (min, mincount))
write_message("Highest value: %s - Number of records: %s" % (max, maxcount))
write_message("Divided into 10 sets:")
for i in range(1, 11):
setcount = 0
distinct_values = {}
lower = -1.0 + ((float(max + 1) / 10)) * (i - 1)
upper = -1.0 + ((float(max + 1) / 10)) * i
for (recID, value) in method.iteritems():
if value >= lower and value <= upper:
setcount += 1
distinct_values[value] = 1
write_message("Set %s (%s-%s) %s Distinct values: %s" % (i, lower, upper, len(distinct_values), setcount))
def check_method(rank_method_code):
write_message("Checking rank method...")
if len(fromDB(rank_method_code)) == 0:
write_message("Rank method not yet executed, please run it to create the necessary data.")
else:
if len(add_recIDs_by_date(rank_method_code)) > 0:
write_message("Records modified, update recommended")
else:
write_message("No records modified, update not necessary")
def load_config(method):
filename = CFG_ETCDIR + "/bibrank/" + method + ".cfg"
config = ConfigParser.ConfigParser()
try:
config.readfp(open(filename))
except StandardError:
write_message("Cannot find configuration file: %s" % filename,
sys.stderr)
raise
return config
def bibrank_engine(run):
"""Run the indexing task.
Return 1 in case of success and 0 in case of failure.
"""
startCreate = time.time()
options["run"] = []
options["run"].append(run)
for rank_method_code in options["run"]:
task_sleep_now_if_required(can_stop_too=True)
cfg_name = getName(rank_method_code)
write_message("Running rank method: %s." % cfg_name)
config = load_config(rank_method_code)
cfg_short = rank_method_code
cfg_function = config.get("rank_method", "function") + "_exec"
cfg_repair_function = config.get("rank_method", "function") + "_repair_exec"
cfg_name = getName(cfg_short)
options["validset"] = get_valid_range(rank_method_code)
if task_get_option("collection"):
l_of_colls = string.split(task_get_option("collection"), ", ")
recIDs = perform_request_search(c=l_of_colls)
recIDs_range = []
for recID in recIDs:
recIDs_range.append([recID, recID])
options["recid_range"] = recIDs_range
elif task_get_option("id"):
options["recid_range"] = task_get_option("id")
elif task_get_option("modified"):
options["recid_range"] = add_recIDs_by_date(rank_method_code, task_get_option("modified"))
elif task_get_option("last_updated"):
options["recid_range"] = add_recIDs_by_date(rank_method_code)
else:
write_message("No records specified, updating all", verbose=2)
min_id = run_sql("SELECT min(id) from bibrec")[0][0]
max_id = run_sql("SELECT max(id) from bibrec")[0][0]
options["recid_range"] = [[min_id, max_id]]
if task_get_option("quick") == "no":
write_message("Recalculate parameter not used, parameter ignored.", verbose=9)
if task_get_option("cmd") == "del":
del_recids(cfg_short, options["recid_range"])
elif task_get_option("cmd") == "add":
func_object = globals().get(cfg_function)
func_object(rank_method_code, cfg_name, config)
elif task_get_option("cmd") == "stat":
rank_method_code_statistics(rank_method_code)
elif task_get_option("cmd") == "check":
check_method(rank_method_code)
elif task_get_option("cmd") == "print-missing":
func_object = globals().get(cfg_function)
func_object(rank_method_code, cfg_name, config)
elif task_get_option("cmd") == "repair":
func_object = globals().get(cfg_repair_function)
func_object()
else:
write_message("Invalid command found processing %s" % rank_method_code, sys.stderr)
raise StandardError
if task_get_option("verbose"):
showtime((time.time() - startCreate))
return 1
def get_valid_range(rank_method_code):
"""Return a range of records"""
write_message("Getting records from collections enabled for rank method.", verbose=9)
res = run_sql("SELECT collection.name FROM collection, collection_rnkMETHOD, rnkMETHOD WHERE collection.id=id_collection and id_rnkMETHOD=rnkMETHOD.id and rnkMETHOD.name=%s", (rank_method_code, ))
l_of_colls = []
for coll in res:
l_of_colls.append(coll[0])
if len(l_of_colls) > 0:
recIDs = perform_request_search(c=l_of_colls)
else:
recIDs = []
valid = intbitset()
valid += recIDs
return valid
def add_recIDs_by_date(rank_method_code, dates=""):
"""Return recID range from records modified between DATES[0] and DATES[1].
If DATES is not set, then add records modified since the last run of
the ranking method RANK_METHOD_CODE.
"""
if not dates:
try:
dates = (get_lastupdated(rank_method_code), '')
except Exception:
dates = ("0000-00-00 00:00:00", '')
if dates[0] is None:
dates = ("0000-00-00 00:00:00", '')
query = """SELECT b.id FROM bibrec AS b WHERE b.modification_date >= %s"""
if dates[1]:
query += " and b.modification_date <= %s"
query += " ORDER BY b.id ASC"""
if dates[1]:
res = run_sql(query, (dates[0], dates[1]))
else:
res = run_sql(query, (dates[0], ))
alist = create_range_list([row[0] for row in res])
if not alist:
write_message("No new records added since last time method was run")
return alist
def getName(rank_method_code, ln=CFG_SITE_LANG, type='ln'):
"""Returns the name of the method if it exists"""
try:
rnkid = run_sql("SELECT id FROM rnkMETHOD where name=%s", (rank_method_code, ))
if rnkid:
rnkid = str(rnkid[0][0])
res = run_sql("SELECT value FROM rnkMETHODNAME where type=%s and ln=%s and id_rnkMETHOD=%s", (type, ln, rnkid))
if not res:
res = run_sql("SELECT value FROM rnkMETHODNAME WHERE ln=%s and id_rnkMETHOD=%s and type=%s", (CFG_SITE_LANG, rnkid, type))
if not res:
return rank_method_code
return res[0][0]
else:
raise Exception
except Exception:
write_message("Cannot run rank method, either given code for method is wrong, or it has not been added using the webinterface.")
raise Exception
def single_tag_rank_method(run):
return bibrank_engine(run)
def showtime(timeused):
"""Show time used for method"""
write_message("Time used: %d second(s)." % timeused, verbose=9)
def citation(run):
return bibrank_engine(run)
# Hack to put index based sorting here, but this is very similar to tag
#based method and should re-use a lot of this code, so better to have here
#than separate
#
def index_term_count_exec(rank_method_code, name, config):
"""Creating the rank method data"""
write_message("Recreating index weighting data")
begin_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
# we must recalculate these every time for all records, since the
# weighting of a record is determined by the index entries of _other_
# records
rnkset = calculate_index_term_count(config)
intoDB(rnkset, begin_date, rank_method_code)
def calculate_index_term_count(config):
"""Calculate the weight of a record set based on number of enries of a
tag from the record in another index...useful for authority files"""
records = []
if config.has_section("index_term_count"):
index = config.get("index_term_count","index_table_name")
tag = config.get("index_term_count","index_term_value_from_tag")
# check against possible SQL injection:
dummy = get_table_update_time(index)
tag = wash_table_column_name(tag)
else:
raise Exception("Config file " + config + " does not have index_term_count section")
return()
task_sleep_now_if_required(can_stop_too=True)
write_message("......Processing all records")
query = "SELECT id_bibrec, value FROM bib%sx, bibrec_bib%sx WHERE tag=%%s AND id_bibxxx=id" % \
(tag[0:2], tag[0:2]) # we checked that tag is safe
records = list(run_sql(query, (tag,)))
write_message("Number of records found with the necessary tags: %s" % len(records))
rnkset = {}
for key, value in records:
hits = 0
if len(value):
query = "SELECT hitlist from %s where term = %%s" % index # we checked that index is a table
row = run_sql(query, (value,))
if row and row[0] and row[0][0]:
#has to be prepared for corrupted data!
try:
hits = len(intbitset(row[0][0]))
except:
hits = 0
rnkset[key] = hits
write_message("Number of records available in rank method: %s" % len(rnkset))
return rnkset
def index_term_count(run):
return bibrank_engine(run)
| gpl-2.0 |
DNESS/cocos2d-js | templates/js-template-default/frameworks/runtime-src/proj.android/build_native.py | 43 | 5988 | #!/usr/bin/python
'''
build_native.py
This script will copy resources to assets and build native code with NDK.
'''
import sys
import os, os.path
import shutil
from optparse import OptionParser
def get_num_of_cpu():
''' The build process can be accelerated by running multiple concurrent job processes using the -j-option.
'''
try:
platform = sys.platform
if platform == 'win32':
if 'NUMBER_OF_PROCESSORS' in os.environ:
return int(os.environ['NUMBER_OF_PROCESSORS'])
else:
return 1
else:
from numpy.distutils import cpuinfo
return cpuinfo.cpu._getNCPUs()
except Exception:
print "Can't know cpuinfo, use default 1 cpu"
return 1
def check_environment_variables():
''' Checking the environment NDK_ROOT, which will be used for building
'''
try:
NDK_ROOT = os.environ['NDK_ROOT']
except Exception:
print "NDK_ROOT not defined. Please define NDK_ROOT in your environment"
sys.exit(1)
return NDK_ROOT
def select_toolchain_version(ndk_root):
ret_version = "4.8"
version_file_path = os.path.join(ndk_root, "RELEASE.TXT")
try:
versionFile = open(version_file_path)
lines = versionFile.readlines()
versionFile.close()
version_num = None
version_char = None
pattern = r'^[a-zA-Z]+(\d+)(\w)'
for line in lines:
str_line = line.lstrip()
match = re.match(pattern, str_line)
if match:
version_num = int(match.group(1))
version_char = match.group(2)
break
if version_num is None:
print("Parse NDK version from file %s failed." % version_file_path)
else:
version_char = version_char.lower()
if version_num > 10 or (version_num == 10 and cmp(version_char, 'c') >= 0):
ret_version = "4.9"
except:
print("Parse NDK version from file %s failed." % version_file_path)
print("NDK_TOOLCHAIN_VERSION: %s" % ret_version)
if ret_version == "4.8":
print(
"Your application may crash when using c++ 11 regular expression with NDK_TOOLCHAIN_VERSION %s" % ret_version)
return ret_version
def do_build(cocos_root, ndk_root, app_android_root, ndk_build_param,sdk_root,build_mode):
ndk_path = os.path.join(ndk_root, "ndk-build")
ndk_toolchain_version = select_toolchain_version(ndk_root)
# windows should use ";" to seperate module paths
platform = sys.platform
if platform == 'win32':
ndk_module_path = 'NDK_MODULE_PATH=%s/..;%s;%s/external;%s/cocos NDK_TOOLCHAIN_VERSION=%s' % (cocos_root, cocos_root, cocos_root, cocos_root, ndk_toolchain_version)
else:
ndk_module_path = 'NDK_MODULE_PATH=%s/..:%s:%s/external:%s/cocos NDK_TOOLCHAIN_VERSION=%s' % (cocos_root, cocos_root, cocos_root, cocos_root, ndk_toolchain_version)
num_of_cpu = get_num_of_cpu()
if ndk_build_param == None:
command = '%s -j%d -C %s NDK_DEBUG=%d %s' % (ndk_path, num_of_cpu, app_android_root, build_mode=='debug', ndk_module_path)
else:
command = '%s -j%d -C %s NDK_DEBUG=%d %s %s' % (ndk_path, num_of_cpu, app_android_root, build_mode=='debug', ndk_build_param, ndk_module_path)
print command
if os.system(command) != 0:
raise Exception("Build dynamic library for project [ " + app_android_root + " ] fails!")
def copy_files(src, dst):
for item in os.listdir(src):
path = os.path.join(src, item)
# Android can not package the file that ends with ".gz"
if not item.startswith('.') and not item.endswith('.gz') and os.path.isfile(path):
shutil.copy(path, dst)
if os.path.isdir(path):
new_dst = os.path.join(dst, item)
os.mkdir(new_dst)
copy_files(path, new_dst)
def copy_resources(app_android_root):
# remove app_android_root/assets if it exists
assets_dir = os.path.join(app_android_root, "assets")
if os.path.isdir(assets_dir):
shutil.rmtree(assets_dir)
# copy resources
os.mkdir(assets_dir)
assets_res_dir = assets_dir + "/res";
assets_scripts_dir = assets_dir + "/src";
assets_jsb_dir = assets_dir + "/script";
os.mkdir(assets_res_dir);
os.mkdir(assets_scripts_dir);
os.mkdir(assets_jsb_dir);
shutil.copy(os.path.join(app_android_root, "../../../main.js"), assets_dir)
shutil.copy(os.path.join(app_android_root, "../../../project.json"), assets_dir)
resources_dir = os.path.join(app_android_root, "../../../res")
copy_files(resources_dir, assets_res_dir)
resources_dir = os.path.join(app_android_root, "../../../src")
copy_files(resources_dir, assets_scripts_dir)
resources_dir = os.path.join(app_android_root, "../../../frameworks/js-bindings/bindings/script")
copy_files(resources_dir, assets_jsb_dir)
def build(targets,ndk_build_param,build_mode):
ndk_root = check_environment_variables()
sdk_root = None
project_root = os.path.dirname(os.path.realpath(__file__))
cocos_root = os.path.join(project_root, "..", "..", "..", "frameworks/js-bindings/cocos2d-x")
print cocos_root
if build_mode is None:
build_mode = 'debug'
elif build_mode != 'release':
build_mode = 'debug'
copy_resources(project_root)
do_build(cocos_root, ndk_root, project_root,ndk_build_param,sdk_root,build_mode)
# -------------- main --------------
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-n", "--ndk", dest="ndk_build_param",
help='Parameter for ndk-build')
parser.add_option("-b", "--build", dest="build_mode",
help='The build mode for NDK project, debug or release')
(opts, args) = parser.parse_args()
try:
build(args, opts.ndk_build_param,opts.build_mode)
except Exception as e:
print e
sys.exit(1)
| mit |
sparkslabs/kamaelia_ | Sketches/TG/old_shard/cshard/cshardtest.py | 3 | 3556 | # -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cshard import *
"""
Code generation testing
"""
#~ # importmodules
imps = importmodules('lala', 'doo', 'ming', wheee = ['huphup', 'pop', 'pip'], nanoo = ('noom', ))
for line in imps:
print line,
#~ # setindent
impsind = indent(imps, level = 0)
for line in impsind:
print line,
impsind = indent(imps, level = 1)
for line in impsind:
print line,
impsind = indent(imps, level = 2)
for line in impsind:
print line,
impsind = indent(impsind, level = -1)
for line in impsind:
print line,
impsind = indent(impsind)
for line in impsind:
print line,
#~ # makeclass
for line in makeclass("CMagnaDoodle"):
print line,
for line in makeclass("CMagnaDoodle", []):
print line,
for line in makeclass("CMagnaDoodle", ['Axon.Component.component']):
print line,
for line in makeclass("CMagnaDoodle", ['Axon.Component.component', 'dummy']):
print line,
#~ # makedoc
doc = "one line doc"
docs = makedoc(doc)
for line in docs:
print line,
doc = "manymany\nline\ndoc\ndoo doo doo"
docs = makedoc(doc)
for line in docs:
print line,
#~ # makeboxes
for line in makeboxes():
print line,
print
for line in makeboxes(True, False):
print line,
print
for line in makeboxes(inboxes = False, default = True):
print line,
print
for line in makeboxes(inboxes = True, default = False, doo = "useless box", dum = "twin"):
print line,
print
for line in makeboxes(True, True, doo = "useless box", dum = "twin"):
print line,
print
#~ # getshard
from CDrawing import *
for line in getshard(drawBG):
print line,
print
for line in getshard(drawBG, 2):
print line,
print
for line in getshard(drawBG, 0):
print line,
print
for line in getshard(blitToSurface, 3):
print line,
print
for line in getshard(displaySetup):
print line,
print
#~ # annotateshard
from CDrawing import *
for line in annotateshard(getshard(drawBG), "drawBG"):
print line,
print
for line in annotateshard(getshard(drawBG, 2), 'pop', 2):
print line,
print
for line in annotateshard(getshard(drawBG, 0), 'drawBG', 0, delimchar='='):
print line,
print
for line in annotateshard(getshard(blitToSurface, 3), 'bts', delimchar='e'):
print line,
print
for line in annotateshard(getshard(displaySetup), ""):
print line,
print
#~ # makearglist
args = ['la', 'hmm']
kwargs = {'pop':'True', 'num':'1'}
print makearglist([], kwargs)
print makearglist(args, None)
print makearglist(args, kwargs, exarg = 'args')
print makearglist(None, kwargs, exkwarg = 'kwargs')
print makearglist(args, {}, exarg = 'args', exkwarg = 'kwargs')
print
#~ # makefunction (incomplete...)
args = ['la', 'hmm']
kwargs = {'pop':'True', 'num':'1'}
name = 'fun'
print makefunction(name, args, kwargs, exkwarg = 'kwargs')
| apache-2.0 |
gaqzi/ansible-modules-extras | web_infrastructure/jira.py | 8 | 10111 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Steve Smith <ssmith@atlassian.com>
# Atlassian open-source approval reference OSR-76.
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
module: jira
version_added: "1.6"
short_description: create and modify issues in a JIRA instance
description:
- Create and modify issues in a JIRA instance.
options:
uri:
required: true
description:
- Base URI for the JIRA instance
operation:
required: true
aliases: [ command ]
choices: [ create, comment, edit, fetch, transition ]
description:
- The operation to perform.
username:
required: true
description:
- The username to log-in with.
password:
required: true
description:
- The password to log-in with.
project:
aliases: [ prj ]
required: false
description:
- The project for this operation. Required for issue creation.
summary:
required: false
description:
- The issue summary, where appropriate.
description:
required: false
description:
- The issue description, where appropriate.
issuetype:
required: false
description:
- The issue type, for issue creation.
issue:
required: false
description:
- An existing issue key to operate on.
comment:
required: false
description:
- The comment text to add.
status:
required: false
description:
- The desired status; only relevant for the transition operation.
assignee:
required: false
description:
- Sets the assignee on create or transition operations. Note not all transitions will allow this.
fields:
required: false
description:
- This is a free-form data structure that can contain arbitrary data. This is passed directly to the JIRA REST API (possibly after merging with other required data, as when passed to create). See examples for more information, and the JIRA REST API for the structure required for various fields.
notes:
- "Currently this only works with basic-auth."
author: '"Steve Smith (@tarka)" <ssmith@atlassian.com>'
"""
EXAMPLES = """
# Create a new issue and add a comment to it:
- name: Create an issue
jira: uri={{server}} username={{user}} password={{pass}}
project=ANS operation=create
summary="Example Issue" description="Created using Ansible" issuetype=Task
register: issue
- name: Comment on issue
jira: uri={{server}} username={{user}} password={{pass}}
issue={{issue.meta.key}} operation=comment
comment="A comment added by Ansible"
# Assign an existing issue using edit
- name: Assign an issue using free-form fields
jira: uri={{server}} username={{user}} password={{pass}}
issue={{issue.meta.key}} operation=edit
assignee=ssmith
# Create an issue with an existing assignee
- name: Create an assigned issue
jira: uri={{server}} username={{user}} password={{pass}}
project=ANS operation=create
summary="Assigned issue" description="Created and assigned using Ansible"
issuetype=Task assignee=ssmith
# Edit an issue using free-form fields
- name: Set the labels on an issue using free-form fields
jira: uri={{server}} username={{user}} password={{pass}}
issue={{issue.meta.key}} operation=edit
args: { fields: {labels: ["autocreated", "ansible"]}}
- name: Set the labels on an issue, YAML version
jira: uri={{server}} username={{user}} password={{pass}}
issue={{issue.meta.key}} operation=edit
args:
fields:
labels:
- "autocreated"
- "ansible"
- "yaml"
# Retrieve metadata for an issue and use it to create an account
- name: Get an issue
jira: uri={{server}} username={{user}} password={{pass}}
project=ANS operation=fetch issue="ANS-63"
register: issue
- name: Create a unix account for the reporter
sudo: true
user: name="{{issue.meta.fields.creator.name}}" comment="{{issue.meta.fields.creator.displayName}}"
# Transition an issue by target status
- name: Close the issue
jira: uri={{server}} username={{user}} password={{pass}}
issue={{issue.meta.key}} operation=transition status="Done"
"""
import json
import base64
def request(url, user, passwd, data=None, method=None):
if data:
data = json.dumps(data)
# NOTE: fetch_url uses a password manager, which follows the
# standard request-then-challenge basic-auth semantics. However as
# JIRA allows some unauthorised operations it doesn't necessarily
# send the challenge, so the request occurs as the anonymous user,
# resulting in unexpected results. To work around this we manually
# inject the basic-auth header up-front to ensure that JIRA treats
# the requests as authorized for this user.
auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '')
response, info = fetch_url(module, url, data=data, method=method,
headers={'Content-Type':'application/json',
'Authorization':"Basic %s" % auth})
if info['status'] not in (200, 204):
module.fail_json(msg=info['msg'])
body = response.read()
if body:
return json.loads(body)
else:
return {}
def post(url, user, passwd, data):
return request(url, user, passwd, data=data, method='POST')
def put(url, user, passwd, data):
return request(url, user, passwd, data=data, method='PUT')
def get(url, user, passwd):
return request(url, user, passwd)
def create(restbase, user, passwd, params):
createfields = {
'project': { 'key': params['project'] },
'summary': params['summary'],
'description': params['description'],
'issuetype': { 'name': params['issuetype'] }}
# Merge in any additional or overridden fields
if params['fields']:
createfields.update(params['fields'])
data = {'fields': createfields}
url = restbase + '/issue/'
ret = post(url, user, passwd, data)
return ret
def comment(restbase, user, passwd, params):
data = {
'body': params['comment']
}
url = restbase + '/issue/' + params['issue'] + '/comment'
ret = post(url, user, passwd, data)
return ret
def edit(restbase, user, passwd, params):
data = {
'fields': params['fields']
}
url = restbase + '/issue/' + params['issue']
ret = put(url, user, passwd, data)
return ret
def fetch(restbase, user, passwd, params):
url = restbase + '/issue/' + params['issue']
ret = get(url, user, passwd)
return ret
def transition(restbase, user, passwd, params):
# Find the transition id
turl = restbase + '/issue/' + params['issue'] + "/transitions"
tmeta = get(turl, user, passwd)
target = params['status']
tid = None
for t in tmeta['transitions']:
if t['name'] == target:
tid = t['id']
break
if not tid:
raise ValueError("Failed find valid transition for '%s'" % target)
# Perform it
url = restbase + '/issue/' + params['issue'] + "/transitions"
data = { 'transition': { "id" : tid },
'fields': params['fields']}
ret = post(url, user, passwd, data)
return ret
# Some parameters are required depending on the operation:
OP_REQUIRED = dict(create=['project', 'issuetype', 'summary', 'description'],
comment=['issue', 'comment'],
edit=[],
fetch=['issue'],
transition=['status'])
def main():
global module
module = AnsibleModule(
argument_spec=dict(
uri=dict(required=True),
operation=dict(choices=['create', 'comment', 'edit', 'fetch', 'transition'],
aliases=['command'], required=True),
username=dict(required=True),
password=dict(required=True),
project=dict(),
summary=dict(),
description=dict(),
issuetype=dict(),
issue=dict(aliases=['ticket']),
comment=dict(),
status=dict(),
assignee=dict(),
fields=dict(default={})
),
supports_check_mode=False
)
op = module.params['operation']
# Check we have the necessary per-operation parameters
missing = []
for parm in OP_REQUIRED[op]:
if not module.params[parm]:
missing.append(parm)
if missing:
module.fail_json(msg="Operation %s require the following missing parameters: %s" % (op, ",".join(missing)))
# Handle rest of parameters
uri = module.params['uri']
user = module.params['username']
passwd = module.params['password']
if module.params['assignee']:
module.params['fields']['assignee'] = { 'name': module.params['assignee'] }
if not uri.endswith('/'):
uri = uri+'/'
restbase = uri + 'rest/api/2'
# Dispatch
try:
# Lookup the corresponding method for this operation. This is
# safe as the AnsibleModule should remove any unknown operations.
thismod = sys.modules[__name__]
method = getattr(thismod, op)
ret = method(restbase, user, passwd, module.params)
except Exception, e:
return module.fail_json(msg=e.message)
module.exit_json(changed=True, meta=ret)
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| gpl-3.0 |
bertucho/moviestalk | venv/Lib/site-packages/gunicorn/app/djangoapp.py | 24 | 5026 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
import sys
from gunicorn.app.base import Application
from gunicorn import util
def is_setting_mod(path):
return (os.path.isfile(os.path.join(path, "settings.py")) or
os.path.isfile(os.path.join(path, "settings.pyc")))
def find_settings_module(path):
path = os.path.abspath(path)
project_path = None
settings_name = "settings"
if os.path.isdir(path):
project_path = None
if not is_setting_mod(path):
for d in os.listdir(path):
if d in ('..', '.'):
continue
root = os.path.join(path, d)
if is_setting_mod(root):
project_path = root
break
else:
project_path = path
elif os.path.isfile(path):
project_path = os.path.dirname(path)
settings_name, _ = os.path.splitext(os.path.basename(path))
return project_path, settings_name
def make_default_env(cfg):
if cfg.django_settings:
os.environ['DJANGO_SETTINGS_MODULE'] = cfg.django_settings
if cfg.pythonpath and cfg.pythonpath is not None:
paths = cfg.pythonpath.split(",")
for path in paths:
pythonpath = os.path.abspath(cfg.pythonpath)
if pythonpath not in sys.path:
sys.path.insert(0, pythonpath)
try:
os.environ['DJANGO_SETTINGS_MODULE']
except KeyError:
# not settings env set, try to build one.
cwd = util.getcwd()
project_path, settings_name = find_settings_module(cwd)
if not project_path:
raise RuntimeError("django project not found")
pythonpath, project_name = os.path.split(project_path)
os.environ['DJANGO_SETTINGS_MODULE'] = "%s.%s" % (project_name,
settings_name)
if pythonpath not in sys.path:
sys.path.insert(0, pythonpath)
if project_path not in sys.path:
sys.path.insert(0, project_path)
class DjangoApplication(Application):
def init(self, parser, opts, args):
if args:
if ("." in args[0] and not (os.path.isfile(args[0])
or os.path.isdir(args[0]))):
self.cfg.set("django_settings", args[0])
else:
# not settings env set, try to build one.
project_path, settings_name = find_settings_module(
os.path.abspath(args[0]))
if project_path not in sys.path:
sys.path.insert(0, project_path)
if not project_path:
raise RuntimeError("django project not found")
pythonpath, project_name = os.path.split(project_path)
self.cfg.set("django_settings", "%s.%s" % (project_name,
settings_name))
self.cfg.set("pythonpath", pythonpath)
def load(self):
# chdir to the configured path before loading,
# default is the current dir
os.chdir(self.cfg.chdir)
# set settings
make_default_env(self.cfg)
# load wsgi application and return it.
mod = util.import_module("gunicorn.app.django_wsgi")
return mod.make_wsgi_application()
class DjangoApplicationCommand(Application):
def __init__(self, options, admin_media_path):
self.usage = None
self.prog = None
self.cfg = None
self.config_file = options.get("config") or ""
self.options = options
self.admin_media_path = admin_media_path
self.callable = None
self.project_path = None
self.do_load_config()
def init(self, *args):
if 'settings' in self.options:
self.options['django_settings'] = self.options.pop('settings')
cfg = {}
for k, v in self.options.items():
if k.lower() in self.cfg.settings and v is not None:
cfg[k.lower()] = v
return cfg
def load(self):
# chdir to the configured path before loading,
# default is the current dir
os.chdir(self.cfg.chdir)
# set settings
make_default_env(self.cfg)
# load wsgi application and return it.
mod = util.import_module("gunicorn.app.django_wsgi")
return mod.make_command_wsgi_application(self.admin_media_path)
def run():
"""\
The ``gunicorn_django`` command line runner for launching Django
applications.
"""
util.warn("""This command is deprecated.
You should now run your application with the WSGI interface
installed with your project. Ex.:
gunicorn myproject.wsgi:application
See https://docs.djangoproject.com/en/1.4/howto/deployment/wsgi/gunicorn/
for more info.""")
from gunicorn.app.djangoapp import DjangoApplication
DjangoApplication("%(prog)s [OPTIONS] [SETTINGS_PATH]").run()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.